use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KRaftUtilsTest method testInvalidKafka.
@ParallelTest
public void testInvalidKafka() {
KafkaSpec spec = new KafkaSpecBuilder().withNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("listener").withPort(9092).withTls(true).withType(KafkaListenerType.INTERNAL).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build()).withNewJbodStorage().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withId(1).withSize("100Gi").build()).endJbodStorage().withNewKafkaAuthorizationSimple().endKafkaAuthorizationSimple().endKafka().build();
InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> KRaftUtils.validateKafkaCrForKRaft(spec));
assertThat(ex.getMessage(), is("Kafka configuration is not valid: [Authentication of type 'scram-sha-512` is currently not supported when the UseKRaft feature gate is enabled, Using more than one disk in a JBOD storage is currently not supported when the UseKRaft feature gate is enabled]"));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class ResourceUtils method createKafka.
public static Kafka createKafka(String namespace, String name, int replicas, String image, int healthDelay, int healthTimeout) {
Probe probe = new ProbeBuilder().withInitialDelaySeconds(healthDelay).withTimeoutSeconds(healthTimeout).withFailureThreshold(10).withSuccessThreshold(4).withPeriodSeconds(33).build();
ObjectMeta meta = new ObjectMetaBuilder().withNamespace(namespace).withName(name).withLabels(Labels.fromMap(singletonMap("my-user-label", "cromulent")).toMap()).build();
KafkaBuilder builder = new KafkaBuilder();
return builder.withMetadata(meta).withNewSpec().withNewKafka().withReplicas(replicas).withImage(image).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build()).withLivenessProbe(probe).withReadinessProbe(probe).withStorage(new EphemeralStorage()).endKafka().withNewZookeeper().withReplicas(replicas).withImage(image + "-zk").withLivenessProbe(probe).withReadinessProbe(probe).endZookeeper().endSpec().build();
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class AbstractModelTest method testCreatePersistentVolumeClaims.
@ParallelTest
public void testCreatePersistentVolumeClaims() {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().withNewSpec().withNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withTls(false).withType(KafkaListenerType.INTERNAL).build()).withReplicas(2).withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup());
// JBOD Storage
Storage storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build();
List<PersistentVolumeClaim> pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(4));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
assertThat(pvcs.get(2).getMetadata().getName(), is("data-1-my-cluster-kafka-0"));
assertThat(pvcs.get(3).getMetadata().getName(), is("data-1-my-cluster-kafka-1"));
// JBOD with Ephemeral storage
storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new EphemeralStorageBuilder().withId(1).build()).build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
// Persistent Claim storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Persistent Claim with ID storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Ephemeral Storage
storage = new EphemeralStorageBuilder().build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(0));
// JBOD Storage without ID
final Storage finalStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build()).build();
InvalidResourceException ex = Assertions.assertThrows(InvalidResourceException.class, () -> kc.generatePersistentVolumeClaims(finalStorage));
assertThat(ex.getMessage(), is("The 'id' property is required for volumes in JBOD storage."));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaListenerReconcilerIngressV1Beta1Test method testIngressV1Beta1.
@Test
public void testIngressV1Beta1(VertxTestContext context) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("ingress").withPort(9094).withTls(true).withType(KafkaListenerType.INGRESS).withNewConfiguration().withNewBootstrap().withHost("bootstrap.mydomain.tld").endBootstrap().withBrokers(new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withHost("broker-0.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(1).withHost("broker-1.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(2).withHost("broker-2.mydomain.tld").build()).endConfiguration().build()).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().withNewEntityOperator().withNewUserOperator().endUserOperator().withNewTopicOperator().endTopicOperator().endEntityOperator().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock ingress v1beta1 ops
IngressV1Beta1Operator mockIngressV1Beta1ops = supplier.ingressV1Beta1Operations;
ArgumentCaptor<io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress> ingressV1Beta1Captor = ArgumentCaptor.forClass(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class);
when(mockIngressV1Beta1ops.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressV1Beta1ops.reconcile(any(), anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress())));
when(mockIngressV1Beta1ops.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock ingress v1 ops
IngressOperator mockIngressOps = supplier.ingressOperations;
ArgumentCaptor<Ingress> ingressCaptor = ArgumentCaptor.forClass(Ingress.class);
when(mockIngressOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressOps.reconcile(any(), anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress())));
when(mockIngressOps.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
MockKafkaListenersReconciler reconciler = new MockKafkaListenersReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME), kafkaCluster, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), supplier.secretOperations, supplier.serviceOperations, supplier.routeOperations, supplier.ingressOperations, supplier.ingressV1Beta1Operations);
Checkpoint async = context.checkpoint();
reconciler.reconcile().onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(ingressCaptor.getAllValues().size(), is(0));
assertThat(ingressV1Beta1Captor.getAllValues().size(), is(4));
verify(mockIngressOps, never()).list(any(), any());
verify(mockIngressOps, never()).reconcile(any(), any(), any(), any());
verify(mockIngressOps, never()).hasIngressAddress(any(), any(), any(), anyLong(), anyLong());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaListenerReconcilerSkipBootstrapLoadBalancerTest method testLoadBalancerSkipBootstrapService.
@Test
public void testLoadBalancerSkipBootstrapService(VertxTestContext context) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(CLUSTER_NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("external").withPort(LISTENER_PORT).withTls(true).withType(KafkaListenerType.LOADBALANCER).withNewConfiguration().withCreateBootstrapService(false).endConfiguration().build()).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().withNewEntityOperator().withNewUserOperator().endUserOperator().withNewTopicOperator().endTopicOperator().endEntityOperator().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = prepareResourceOperatorSupplier();
MockKafkaListenersReconciler reconciler = new MockKafkaListenersReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), kafkaCluster, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), supplier.secretOperations, supplier.serviceOperations, supplier.routeOperations, supplier.ingressOperations, supplier.ingressV1Beta1Operations);
Checkpoint async = context.checkpoint();
reconciler.reconcile().onComplete(context.succeeding(res -> context.verify(() -> {
// Check status
assertThat(res.listenerStatuses.size(), is(1));
ListenerStatus listenerStatus = res.listenerStatuses.get(0);
assertThat(listenerStatus.getBootstrapServers(), is("broker-0.test.dns.name:9094,broker-1.test.dns.name:9094,broker-2.test.dns.name:9094"));
assertThat(listenerStatus.getAddresses().size(), is(3));
assertThat(listenerStatus.getAddresses().get(0).getHost(), is(DNS_NAME_FOR_BROKER_0));
assertThat(listenerStatus.getAddresses().get(1).getHost(), is(DNS_NAME_FOR_BROKER_1));
assertThat(listenerStatus.getAddresses().get(2).getHost(), is(DNS_NAME_FOR_BROKER_2));
assertThat(listenerStatus.getAddresses().get(0).getPort(), is(LISTENER_PORT));
assertThat(listenerStatus.getAddresses().get(1).getPort(), is(LISTENER_PORT));
assertThat(listenerStatus.getAddresses().get(2).getPort(), is(LISTENER_PORT));
// Check creation of services
verify(supplier.serviceOperations, never()).reconcile(any(), eq(NAMESPACE), eq(CLUSTER_NAME + "-kafka-external-bootstrap"), notNull());
verify(supplier.serviceOperations, times(1)).reconcile(any(), eq(NAMESPACE), eq(CLUSTER_NAME + "-kafka-0"), notNull());
verify(supplier.serviceOperations, times(1)).reconcile(any(), eq(NAMESPACE), eq(CLUSTER_NAME + "-kafka-1"), notNull());
verify(supplier.serviceOperations, times(1)).reconcile(any(), eq(NAMESPACE), eq(CLUSTER_NAME + "-kafka-2"), notNull());
async.flag();
})));
}
Aggregations