use of io.strimzi.api.kafka.model.KafkaBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorIngressKafkaListenerTest method testIngressV1.
@Test
public void testIngressV1(VertxTestContext context) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("ingress").withPort(9094).withTls(true).withType(KafkaListenerType.INGRESS).withNewConfiguration().withNewBootstrap().withHost("bootstrap.mydomain.tld").endBootstrap().withBrokers(new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withHost("broker-0.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(1).withHost("broker-1.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(2).withHost("broker-2.mydomain.tld").build()).endConfiguration().build()).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().withNewEntityOperator().withNewUserOperator().endUserOperator().withNewTopicOperator().endTopicOperator().endEntityOperator().endSpec().build();
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the CRD Operator for Kafka resources
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture());
// Mock the KafkaSet operations
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(eq(NAMESPACE), eq(KafkaCluster.kafkaClusterName(NAME)))).thenReturn(Future.succeededFuture());
// Mock the StrimziPodSet operator
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
// Mock the Pod operations
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock ingress v1beta1 ops
IngressV1Beta1Operator mockIngressV1Beta1ops = supplier.ingressV1Beta1Operations;
ArgumentCaptor<io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress> ingressV1Beta1Captor = ArgumentCaptor.forClass(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class);
when(mockIngressV1Beta1ops.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressV1Beta1ops.reconcile(any(), anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress())));
when(mockIngressV1Beta1ops.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock ingress v1 ops
IngressOperator mockIngressOps = supplier.ingressOperations;
ArgumentCaptor<Ingress> ingressCaptor = ArgumentCaptor.forClass(Ingress.class);
when(mockIngressOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressOps.reconcile(any(), anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress())));
when(mockIngressOps.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
KafkaAssemblyOperator op = new MockKafkaAssemblyOperatorForIngressTests(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_19), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(KafkaVersionTestUtils.getKafkaVersionLookup()));
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
Checkpoint async = context.checkpoint();
op.reconcile(reconciliation).onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(ingressCaptor.getAllValues().size(), is(4));
assertThat(ingressV1Beta1Captor.getAllValues().size(), is(0));
verify(mockIngressV1Beta1ops, never()).list(any(), any());
verify(mockIngressV1Beta1ops, never()).reconcile(any(), any(), any(), any());
verify(mockIngressV1Beta1ops, never()).hasIngressAddress(any(), any(), any(), anyLong(), anyLong());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorManualRollingUpdatesTest method manualPodCleanup.
public void manualPodCleanup(VertxTestContext context, boolean useStrimziPodSets) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(clusterName).withNamespace(namespace).withGeneration(2L).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withId(1).withSize("100Gi").build()).endJbodStorage().endKafka().withNewZookeeper().withReplicas(3).withNewPersistentClaimStorage().withSize("100Gi").endPersistentClaimStorage().endZookeeper().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
if (useStrimziPodSets) {
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, null)));
when(mockPodSetOps.deleteAsync(any(), any(), eq(zkCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getName()), any())).thenReturn(Future.succeededFuture());
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(kafkaCluster.generatePodSet(kafka.getSpec().getKafka().getReplicas(), false, null, null, null)));
when(mockPodSetOps.deleteAsync(any(), any(), eq(kafkaCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getName()), any())).thenReturn(Future.succeededFuture());
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
} else {
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(zkCluster.generateStatefulSet(false, null, null)));
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(kafkaCluster.generateStatefulSet(false, null, null, null)));
when(mockStsOps.deleteAsync(any(), any(), eq(zkCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockStsOps.deleteAsync(any(), any(), eq(kafkaCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockStsOps.reconcile(any(), any(), eq(zkCluster.getName()), any())).thenReturn(Future.succeededFuture());
when(mockStsOps.reconcile(any(), any(), eq(kafkaCluster.getName()), any())).thenReturn(Future.succeededFuture());
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
}
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenAnswer(i -> {
List<Pod> pods = new ArrayList<>();
pods.add(podWithName("my-cluster-zookeeper-0"));
pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-1", Collections.singletonMap(AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC, "true")));
pods.add(podWithName("my-cluster-zookeeper-2"));
return Future.succeededFuture(pods);
});
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> {
List<Pod> pods = new ArrayList<>();
pods.add(podWithNameAndAnnotations("my-cluster-kafka-0", Collections.singletonMap(AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC, "true")));
pods.add(podWithName("my-cluster-kafka-1"));
pods.add(podWithName("my-cluster-kafka-2"));
return Future.succeededFuture(pods);
});
when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
PvcOperator mockPvcOps = supplier.pvcOperations;
when(mockPvcOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(zkCluster.generatePersistentVolumeClaims()));
when(mockPvcOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(kafkaCluster.generatePersistentVolumeClaims(kafkaCluster.getStorage())));
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config;
if (useStrimziPodSets) {
config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
} else {
config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS);
}
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Zookeeper rolling updates
assertThat(kao.cleanedPodsCount, is(2));
assertThat(kao.cleanedPods, Matchers.containsInAnyOrder("my-cluster-zookeeper-1", "my-cluster-kafka-0"));
assertThat(kao.cleanedPvcs, Matchers.containsInAnyOrder("data-my-cluster-zookeeper-1", "data-0-my-cluster-kafka-0", "data-1-my-cluster-kafka-0"));
assertThat(kao.createdPvcs, Matchers.containsInAnyOrder("data-my-cluster-zookeeper-1", "data-0-my-cluster-kafka-0", "data-1-my-cluster-kafka-0"));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaBuilder in project strimzi by strimzi.
the class JbodStorageTest method init.
@BeforeEach
private void init() {
this.volumes = new ArrayList<>(2);
volumes.add(new PersistentClaimStorageBuilder().withId(0).withDeleteClaim(true).withSize("100Gi").build());
volumes.add(new PersistentClaimStorageBuilder().withId(1).withDeleteClaim(false).withSize("100Gi").build());
this.kafka = new KafkaBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(NAME).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(volumes).endJbodStorage().endKafka().withNewZookeeper().withReplicas(1).endZookeeper().endSpec().build();
// setting up the Kafka CRD
CustomResourceDefinition kafkaAssemblyCrd = Crds.kafka();
// setting up a mock Kubernetes client
this.mockClient = new MockKube().withCustomResourceDefinition(kafkaAssemblyCrd, Kafka.class, KafkaList.class).end().withCustomResourceDefinition(Crds.strimziPodSet(), StrimziPodSet.class, StrimziPodSetList.class).end().build();
// initialize a Kafka in MockKube
Crds.kafkaOperation(this.mockClient).inNamespace(NAMESPACE).withName(NAME).create(this.kafka);
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16);
// creating the Kafka operator
ResourceOperatorSupplier ros = new ResourceOperatorSupplier(this.vertx, this.mockClient, ResourceUtils.zookeeperLeaderFinder(this.vertx, this.mockClient), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, FeatureGates.NONE, 60_000L);
this.operator = new KafkaAssemblyOperator(this.vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), ros, ResourceUtils.dummyClusterOperatorConfig(VERSIONS, 2_000));
}
use of io.strimzi.api.kafka.model.KafkaBuilder in project strimzi by strimzi.
the class JbodStorageTest method testReconcileWithNewVolumeAddedToJbodStorage.
@Test
public void testReconcileWithNewVolumeAddedToJbodStorage(VertxTestContext context) {
Checkpoint async = context.checkpoint();
// Add a new volume to Jbod Storage
volumes.add(new PersistentClaimStorageBuilder().withId(2).withDeleteClaim(false).withSize("100Gi").build());
Kafka kafkaWithNewJbodVolume = new KafkaBuilder(kafka).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(volumes).build()).endKafka().endSpec().build();
Set<String> expectedPvcs = expectedPvcs(kafka);
Set<String> expectedPvcsWithNewJbodStorageVolume = expectedPvcs(kafkaWithNewJbodVolume);
// reconcile for kafka cluster creation
operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcs));
}))).compose(v -> {
Crds.kafkaOperation(mockClient).inNamespace(NAMESPACE).withName(NAME).patch(kafkaWithNewJbodVolume);
// reconcile kafka cluster with new Jbod storage
return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcsWithNewJbodStorageVolume));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaBuilder in project strimzi by strimzi.
the class JbodStorageTest method testReconcileWithUpdateVolumeIdJbod.
@Test
public void testReconcileWithUpdateVolumeIdJbod(VertxTestContext context) {
Checkpoint async = context.checkpoint();
// trying to update id for a volume from in the JBOD storage
volumes.get(0).setId(3);
Kafka kafkaWithUpdatedJbodVolume = new KafkaBuilder(this.kafka).editSpec().editKafka().withStorage(new JbodStorageBuilder().withVolumes(volumes).build()).endKafka().endSpec().build();
Set<String> expectedPvcs = expectedPvcs(kafka);
Set<String> expectedPvcsWithUpdatedJbodStorageVolume = expectedPvcs(kafkaWithUpdatedJbodVolume);
// reconcile for kafka cluster creation
operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcs));
}))).compose(v -> {
Crds.kafkaOperation(mockClient).inNamespace(NAMESPACE).withName(NAME).patch(kafkaWithUpdatedJbodVolume);
// reconcile kafka cluster with a Jbod storage volume removed
return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
List<PersistentVolumeClaim> pvcs = getPvcs(NAMESPACE, NAME);
Set<String> pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet());
assertThat(pvcsNames, is(expectedPvcsWithUpdatedJbodStorageVolume));
async.flag();
})));
}
Aggregations