use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaPodSetTest method testPodSet.
@ParallelTest
public void testPodSet() {
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet ps = kc.generatePodSet(3, true, null, null, brokerId -> Map.of("test-anno", kc.getPodName(brokerId)));
assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER)));
assertThat(ps.getMetadata().getLabels().entrySet().containsAll(kc.getLabelsWithStrimziName(kc.getName(), null).toMap().entrySet()), is(true));
assertThat(ps.getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withDeleteClaim(false).build()).build())));
assertThat(ps.getMetadata().getOwnerReferences().size(), is(1));
assertThat(ps.getMetadata().getOwnerReferences().get(0), is(kc.createOwnerReference()));
assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().toMap()));
assertThat(ps.getSpec().getPods().size(), is(3));
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getMetadata().getLabels().entrySet().containsAll(kc.getLabelsWithStrimziNameAndPodName(kc.getName(), pod.getMetadata().getName(), null).withStatefulSetPod(pod.getMetadata().getName()).withStrimziPodSetController(kc.getName()).toMap().entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().size(), is(2));
assertThat(pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION), is(notNullValue()));
assertThat(pod.getMetadata().getAnnotations().get("test-anno"), is(pod.getMetadata().getName()));
assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName()));
assertThat(pod.getSpec().getSubdomain(), is(kc.getHeadlessServiceName()));
assertThat(pod.getSpec().getRestartPolicy(), is("Always"));
assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L));
assertThat(pod.getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElse(null).getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(5));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(15));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(5));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(15));
assertThat(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(AbstractModel.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is("data-0"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is("/var/lib/kafka/data-0"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getName(), is("kafka-metrics-and-logging"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getName(), is("ready-files"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getMountPath(), is("/var/opt/kafka"));
assertThat(pod.getSpec().getVolumes().size(), is(7));
assertThat(pod.getSpec().getVolumes().get(0).getName(), is("data-0"));
assertThat(pod.getSpec().getVolumes().get(0).getPersistentVolumeClaim(), is(notNullValue()));
assertThat(pod.getSpec().getVolumes().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getVolumes().get(1).getEmptyDir(), is(notNullValue()));
assertThat(pod.getSpec().getVolumes().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(pod.getSpec().getVolumes().get(2).getSecret().getSecretName(), is("my-cluster-cluster-ca-cert"));
assertThat(pod.getSpec().getVolumes().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(pod.getSpec().getVolumes().get(3).getSecret().getSecretName(), is("my-cluster-kafka-brokers"));
assertThat(pod.getSpec().getVolumes().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(pod.getSpec().getVolumes().get(4).getSecret().getSecretName(), is("my-cluster-clients-ca-cert"));
assertThat(pod.getSpec().getVolumes().get(5).getName(), is("kafka-metrics-and-logging"));
assertThat(pod.getSpec().getVolumes().get(5).getConfigMap().getName(), is(pod.getMetadata().getName()));
assertThat(pod.getSpec().getVolumes().get(6).getName(), is("ready-files"));
assertThat(pod.getSpec().getVolumes().get(6).getEmptyDir(), is(notNullValue()));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaPodSetTest method testCustomizedPodSet.
@SuppressWarnings({ "checkstyle:MethodLength" })
@ParallelTest
public void testCustomizedPodSet() {
// Prepare various template values
Map<String, String> spsLabels = TestUtils.map("l1", "v1", "l2", "v2");
Map<String, String> spsAnnos = TestUtils.map("a1", "v1", "a2", "v2");
Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
Map<String, String> podAnnos = TestUtils.map("a3", "v3", "a4", "v4");
HostAlias hostAlias1 = new HostAliasBuilder().withHostnames("my-host-1", "my-host-2").withIp("192.168.1.86").build();
HostAlias hostAlias2 = new HostAliasBuilder().withHostnames("my-host-3").withIp("192.168.1.87").build();
TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> toleration = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
ContainerEnvVar envVar1 = new ContainerEnvVar();
String testEnvOneKey = "TEST_ENV_1";
String testEnvOneValue = "test.env.one";
envVar1.setName(testEnvOneKey);
envVar1.setValue(testEnvOneValue);
ContainerEnvVar envVar2 = new ContainerEnvVar();
String testEnvTwoKey = "TEST_ENV_2";
String testEnvTwoValue = "test.env.two";
envVar2.setName(testEnvTwoKey);
envVar2.setValue(testEnvTwoValue);
SecurityContext securityContext = new SecurityContextBuilder().withPrivileged(false).withReadOnlyRootFilesystem(false).withAllowPrivilegeEscalation(false).withRunAsNonRoot(true).withNewCapabilities().addNewDrop("ALL").endCapabilities().build();
String image = "my-custom-image:latest";
Probe livenessProbe = new Probe();
livenessProbe.setInitialDelaySeconds(1);
livenessProbe.setTimeoutSeconds(2);
livenessProbe.setSuccessThreshold(3);
livenessProbe.setFailureThreshold(4);
livenessProbe.setPeriodSeconds(5);
Probe readinessProbe = new Probe();
readinessProbe.setInitialDelaySeconds(6);
readinessProbe.setTimeoutSeconds(7);
readinessProbe.setSuccessThreshold(8);
readinessProbe.setFailureThreshold(9);
readinessProbe.setPeriodSeconds(10);
// Use the template values in Kafka CR
Kafka kafka = new KafkaBuilder(KAFKA).editSpec().editKafka().withImage(image).withNewJvmOptions().withGcLoggingEnabled(true).endJvmOptions().withReadinessProbe(readinessProbe).withLivenessProbe(livenessProbe).withConfig(Map.of("foo", "bar")).withNewTemplate().withNewPodSet().withNewMetadata().withLabels(spsLabels).withAnnotations(spsAnnos).endMetadata().endPodSet().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnnos).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withHostAliases(hostAlias1, hostAlias2).withTopologySpreadConstraints(tsc1, tsc2).withAffinity(affinity).withTolerations(toleration).withEnableServiceLinks(false).withTmpDirSizeLimit("10Mi").withTerminationGracePeriodSeconds(123).withImagePullSecrets(secret1, secret2).withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()).endPod().withNewKafkaContainer().withEnv(envVar1, envVar2).withSecurityContext(securityContext).endKafkaContainer().endTemplate().endKafka().endSpec().build();
// Test the resources
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
StrimziPodSet ps = kc.generatePodSet(3, true, null, null, brokerId -> Map.of("special", "annotation"));
assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER)));
assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true));
assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnos.entrySet()), is(true));
assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().toMap()));
assertThat(ps.getSpec().getPods().size(), is(3));
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().entrySet().containsAll(podAnnos.entrySet()), is(true));
assertThat(pod.getMetadata().getAnnotations().get("special"), is("annotation"));
assertThat(pod.getSpec().getPriorityClassName(), is("top-priority"));
assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler"));
assertThat(pod.getSpec().getHostAliases(), containsInAnyOrder(hostAlias1, hostAlias2));
assertThat(pod.getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
assertThat(pod.getSpec().getEnableServiceLinks(), is(false));
assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(123L));
assertThat(pod.getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElse(null).getEmptyDir().getSizeLimit(), is(new Quantity("10Mi")));
assertThat(pod.getSpec().getImagePullSecrets().size(), is(2));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
assertThat(pod.getSpec().getSecurityContext(), is(notNullValue()));
assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(123L));
assertThat(pod.getSpec().getSecurityContext().getRunAsGroup(), is(456L));
assertThat(pod.getSpec().getSecurityContext().getRunAsUser(), is(789L));
assertThat(pod.getSpec().getAffinity(), is(affinity));
assertThat(pod.getSpec().getTolerations(), is(toleration));
assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvOneKey.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals(testEnvOneValue), is(true));
assertThat("Failed to correctly set container environment variable: " + testEnvTwoKey, pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvTwoKey.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals(testEnvTwoValue), is(true));
assertThat(pod.getSpec().getContainers(), hasItem(allOf(hasProperty("name", equalTo(KafkaCluster.KAFKA_NAME)), hasProperty("securityContext", equalTo(securityContext)))));
assertThat(pod.getSpec().getContainers().size(), is(1));
assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(livenessProbe.getTimeoutSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(livenessProbe.getInitialDelaySeconds()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getFailureThreshold(), is(livenessProbe.getFailureThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getSuccessThreshold(), is(livenessProbe.getSuccessThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getPeriodSeconds(), is(livenessProbe.getPeriodSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(readinessProbe.getTimeoutSeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(readinessProbe.getInitialDelaySeconds()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getFailureThreshold(), is(readinessProbe.getFailureThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getSuccessThreshold(), is(readinessProbe.getSuccessThreshold()));
assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getPeriodSeconds(), is(readinessProbe.getPeriodSeconds()));
assertThat(AbstractModel.containerEnvVars(pod.getSpec().getContainers().get(0)).get(AbstractModel.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is("true"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is("data-0"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is("/var/lib/kafka/data-0"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getName(), is("kafka-metrics-and-logging"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getName(), is("ready-files"));
assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getMountPath(), is("/var/opt/kafka"));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class VersionChangeCreator method getVersionFromController.
/**
* Collects the information whether the controller resource (StatefulSet or PodSet) exists and what Kafka versions
* they carry in their annotations.
*
* @return Future which completes when the version is collected from the controller resource
*/
private Future<Void> getVersionFromController() {
Future<StatefulSet> stsFuture = stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()));
Future<StrimziPodSet> podSetFuture = strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()));
return CompositeFuture.join(stsFuture, podSetFuture).compose(res -> {
StatefulSet sts = res.resultAt(0);
StrimziPodSet podSet = res.resultAt(1);
if (sts != null && podSet != null) {
// Both StatefulSet and PodSet exist => we create the description based on the feature gate
if (featureGates.useStrimziPodSetsEnabled()) {
versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
} else {
versionFromControllerResource = Annotations.annotations(sts).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
}
freshDeployment = false;
} else if (sts != null) {
// StatefulSet exists, PodSet does nto exist => we create the description from the StatefulSet
versionFromControllerResource = Annotations.annotations(sts).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
freshDeployment = false;
} else if (podSet != null) {
// PodSet exists, StatefulSet does not => we create the description from the PodSet
versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION);
freshDeployment = false;
}
return Future.succeededFuture();
});
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class StrimziPodSetCrdOperatorIT method testUpdateStatusAfterResourceDeletedThrowsKubernetesClientException.
/**
* Tests what happens when the resource is deleted while updating the status
*
* @param context
*/
@Test
public void testUpdateStatusAfterResourceDeletedThrowsKubernetesClientException(VertxTestContext context) {
String resourceName = getResourceName(RESOURCE_NAME);
Checkpoint async = context.checkpoint();
String namespace = getNamespace();
StrimziPodSetOperator op = operator();
// Required to be able to create the resource
readinessHelper(op, namespace, resourceName);
AtomicReference<StrimziPodSet> newStatus = new AtomicReference<>();
LOGGER.info("Creating resource");
op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)).onComplete(context.succeedingThenComplete()).compose(rr -> {
LOGGER.info("Saving resource with status change prior to deletion");
newStatus.set(getResourceWithNewReadyStatus(op.get(namespace, resourceName)));
LOGGER.info("Deleting resource");
return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null);
}).onComplete(context.succeedingThenComplete()).compose(i -> {
LOGGER.info("Wait for confirmed deletion");
return op.waitFor(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, 100L, 10_000L, (n, ns) -> operator().get(namespace, resourceName) == null);
}).compose(i -> {
LOGGER.info("Updating resource with new status - should fail");
return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus.get());
}).onComplete(context.failing(e -> context.verify(() -> {
assertThat(e, instanceOf(KubernetesClientException.class));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class KafkaPodSetTest method testImagePullSecretsFromBoth.
@ParallelTest
public void testImagePullSecretsFromBoth() {
// CR configuration has priority -> CO configuration is ignored if both are set
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Kafka kafka = new KafkaBuilder(KAFKA).editSpec().editKafka().withNewTemplate().withNewPod().withImagePullSecrets(secret2).endPod().endTemplate().endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
StrimziPodSet ps = kc.generatePodSet(3, true, null, List.of(secret1), brokerId -> new HashMap<>());
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getSpec().getImagePullSecrets().size(), is(1));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(false));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
}
}
Aggregations