use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorPodSetTest method testFirstReconciliation.
/**
* Tests the first reconciliation of the Kafka cluster after the UseStrimziPodsSet is enabled for the first time
*
* @param context Test context
*/
@Test
public void testFirstReconciliation(VertxTestContext context) {
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, null);
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet kafkaPodSet = kafkaCluster.generatePodSet(KAFKA.getSpec().getKafka().getReplicas(), false, null, null, brokerId -> null);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture());
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(kafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS, true)));
ArgumentCaptor<String> cmReconciliationCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture());
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
// The PodSet does not exist yet in the first reconciliation
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(null));
when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(zkPodSet)));
// The PodSet does not exist yet in the first reconciliation
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(null));
when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.noop(kafkaPodSet)));
StatefulSetOperator mockStsOps = supplier.stsOperations;
// Zoo STS still exists in the first reconciliation
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(zkCluster.generateStatefulSet(false, null, null)));
// The Zoo STS will be deleted during the reconciliation
when(mockStsOps.deleteAsync(any(), any(), eq(zkCluster.getName()), eq(false))).thenReturn(Future.succeededFuture());
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(kafkaCluster.generateStatefulSet(false, null, null, null)));
// The Kafka STS will be deleted during the reconciliation
when(mockStsOps.deleteAsync(any(), any(), eq(kafkaCluster.getName()), eq(false))).thenReturn(Future.succeededFuture());
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList()));
CrdOperator<KubernetesClient, Kafka, KafkaList> mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA));
when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
MockZooKeeperReconciler zr = new MockZooKeeperReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 0, CLUSTER_CA);
MockKafkaReconciler kr = new MockKafkaReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 0, CLUSTER_CA, CLIENTS_CA);
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), CERT_MANAGER, PASSWORD_GENERATOR, supplier, config, zr, kr);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Test that the old Zoo STS was deleted
verify(mockStsOps, times(1)).deleteAsync(any(), any(), eq(zkCluster.getName()), eq(false));
assertThat(zr.maybeRollZooKeeperInvocations, is(1));
assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), is(List.of()));
assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-1")), is(List.of()));
assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-2")), is(List.of()));
assertThat(kr.maybeRollKafkaInvocations, is(1));
assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-0")), is(List.of()));
assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-1")), is(List.of()));
assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-2")), is(List.of()));
assertThat(cmReconciliationCaptor.getAllValues().size(), is(3));
assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2")));
assertThat(cmDeletionCaptor.getAllValues().size(), is(1));
assertThat(cmDeletionCaptor.getAllValues().get(0), is("my-cluster-kafka-config"));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class ZookeeperPodSetTest method testImagePullSecretsFromCO.
@ParallelTest
public void testImagePullSecretsFromCO() {
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
List<LocalObjectReference> secrets = new ArrayList<>(2);
secrets.add(secret1);
secrets.add(secret2);
ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet ps = zc.generatePodSet(3, true, null, secrets, Map.of());
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getSpec().getImagePullSecrets().size(), is(2));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class ZookeeperPodSetTest method testImagePullSecretsFromBoth.
@ParallelTest
public void testImagePullSecretsFromBoth() {
// CR configuration has priority -> CO configuration is ignored if both are set
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Kafka kafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withNewTemplate().withNewPod().withImagePullSecrets(secret2).endPod().endTemplate().endZookeeper().endSpec().build();
ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
StrimziPodSet ps = zc.generatePodSet(3, true, null, List.of(secret1), Map.of());
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getSpec().getImagePullSecrets().size(), is(1));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(false));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class ZookeeperPodSetTest method testPodSet.
@ParallelTest
public void testPodSet() {
ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet ps = zc.generatePodSet(3, true, null, null, Map.of());
assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER)));
assertThat(ps.getMetadata().getLabels().entrySet().containsAll(zc.getLabelsWithStrimziName(zc.getName(), null).toMap().entrySet()), is(true));
assertThat(ps.getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new PersistentClaimStorageBuilder().withSize("100Gi").withDeleteClaim(false).build())));
assertThat(ps.getMetadata().getOwnerReferences().size(), is(1));
assertThat(ps.getMetadata().getOwnerReferences().get(0), is(zc.createOwnerReference()));
assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap()));
assertThat(ps.getSpec().getPods().size(), is(3));
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getMetadata().getLabels().entrySet().containsAll(zc.getLabelsWithStrimziNameAndPodName(zc.getName(), pod.getMetadata().getName(), null).withStatefulSetPod(pod.getMetadata().getName()).withStrimziPodSetController(zc.getName()).toMap().entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().size(), is(1));
assertThat(pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION), is(notNullValue()));
assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName()));
assertThat(pod.getSpec().getSubdomain(), is(zc.getHeadlessServiceName()));
assertThat(pod.getSpec().getRestartPolicy(), is("Always"));
assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L));
assertThat(pod.getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElse(null).getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(5));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(15));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(5));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(15));
assertThat(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/var/lib/zookeeper"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is("zookeeper-metrics-and-logging"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT));
// Config
OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS);
OrderedProperties actual = new OrderedProperties().addStringPairs(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION));
assertThat(actual, is(expectedConfig));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class StrimziPodSetCrdOperatorIT method testUpdateStatusAfterResourceDeletedThrowsKubernetesClientException.
/**
* Tests what happens when the resource is deleted while updating the status
*
* @param context
*/
@Test
public void testUpdateStatusAfterResourceDeletedThrowsKubernetesClientException(VertxTestContext context) {
String resourceName = getResourceName(RESOURCE_NAME);
Checkpoint async = context.checkpoint();
String namespace = getNamespace();
StrimziPodSetOperator op = operator();
// Required to be able to create the resource
readinessHelper(op, namespace, resourceName);
AtomicReference<StrimziPodSet> newStatus = new AtomicReference<>();
LOGGER.info("Creating resource");
op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)).onComplete(context.succeedingThenComplete()).compose(rr -> {
LOGGER.info("Saving resource with status change prior to deletion");
newStatus.set(getResourceWithNewReadyStatus(op.get(namespace, resourceName)));
LOGGER.info("Deleting resource");
return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null);
}).onComplete(context.succeedingThenComplete()).compose(i -> {
LOGGER.info("Wait for confirmed deletion");
return op.waitFor(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, 100L, 10_000L, (n, ns) -> operator().get(namespace, resourceName) == null);
}).compose(i -> {
LOGGER.info("Updating resource with new status - should fail");
return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus.get());
}).onComplete(context.failing(e -> context.verify(() -> {
assertThat(e, instanceOf(KubernetesClientException.class));
async.flag();
})));
}
Aggregations