use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class ZooKeeperReconciler method podSet.
/**
* Create the StrimziPodSet for the ZooKeeper cluster with a specific number of pods. This is used directly
* during scale-ups or scale-downs.
*
* @param replicas Number of replicas which the PodSet should use
*
* @return Future which completes when the PodSet is created or updated
*/
private Future<Void> podSet(int replicas) {
if (featureGates.useStrimziPodSetsEnabled()) {
Map<String, String> podAnnotations = new LinkedHashMap<>(2);
podAnnotations.put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(ModelUtils.caCertGeneration(this.clusterCa)));
podAnnotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, loggingHash);
StrimziPodSet zkPodSet = zk.generatePodSet(replicas, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, podAnnotations);
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), zkPodSet).compose(rr -> {
podSetDiff = rr;
return Future.succeededFuture();
});
} else {
return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name())).compose(podSet -> {
if (podSet != null) {
return strimziPodSetOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), false);
} else {
return Future.succeededFuture();
}
});
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaPodSetTest method testImagePullSecretsFromCO.
@ParallelTest
public void testImagePullSecretsFromCO() {
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
List<LocalObjectReference> secrets = new ArrayList<>(2);
secrets.add(secret1);
secrets.add(secret2);
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
StrimziPodSet ps = kc.generatePodSet(3, true, null, secrets, brokerId -> new HashMap<>());
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getSpec().getImagePullSecrets().size(), is(2));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaPodSetTest method testImagePullSecretsFromBoth.
@ParallelTest
public void testImagePullSecretsFromBoth() {
// CR configuration has priority -> CO configuration is ignored if both are set
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Kafka kafka = new KafkaBuilder(KAFKA).editSpec().editKafka().withNewTemplate().withNewPod().withImagePullSecrets(secret2).endPod().endTemplate().endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
StrimziPodSet ps = kc.generatePodSet(3, true, null, List.of(secret1), brokerId -> new HashMap<>());
// We need to loop through the pods to make sure they have the right values
List<Pod> pods = PodSetUtils.mapsToPods(ps.getSpec().getPods());
for (Pod pod : pods) {
assertThat(pod.getSpec().getImagePullSecrets().size(), is(1));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(false));
assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true));
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorManualRollingUpdatesTest method manualRollingUpdate.
public void manualRollingUpdate(VertxTestContext context, boolean useStrimziPodSets) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(CLUSTER_NAME).withNamespace(NAMESPACE).withGeneration(2L).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
if (useStrimziPodSets) {
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenAnswer(i -> {
StrimziPodSet zkPodSet = zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, null);
zkPodSet.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true");
return Future.succeededFuture(zkPodSet);
});
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenAnswer(i -> {
StrimziPodSet kafkaPodSet = kafkaCluster.generatePodSet(kafka.getSpec().getKafka().getReplicas(), false, null, null, brokerId -> null);
kafkaPodSet.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true");
return Future.succeededFuture(kafkaPodSet);
});
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
} else {
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenAnswer(i -> {
StatefulSet sts = zkCluster.generateStatefulSet(false, null, null);
sts.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true");
return Future.succeededFuture(sts);
});
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenAnswer(i -> {
StatefulSet sts = kafkaCluster.generateStatefulSet(false, null, null, null);
sts.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true");
return Future.succeededFuture(sts);
});
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
}
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
CrdOperator<KubernetesClient, Kafka, KafkaList> mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config;
if (useStrimziPodSets) {
config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
} else {
config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS);
}
MockZooKeeperReconciler zr = new MockZooKeeperReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kafka, VERSION_CHANGE, null, 0, null);
MockKafkaReconciler kr = new MockKafkaReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kafka, VERSION_CHANGE, null, 0, null, null);
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), CERT_MANAGER, PASSWORD_GENERATOR, supplier, config, zr, kr);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Zookeeper rolling updates
assertThat(zr.maybeRollZooKeeperInvocations, is(1));
assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(Collections.singletonList("manual rolling update")));
assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(Collections.singletonList("manual rolling update")));
assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-2")), is(Collections.singletonList("manual rolling update")));
// Verify Kafka rolling updates
assertThat(kr.maybeRollKafkaInvocations, is(1));
assertThat(kr.kafkaPodNeedsRestart.apply(podWithName("my-cluster-kafka-0")), is(Collections.singletonList("manual rolling update")));
assertThat(kr.kafkaPodNeedsRestart.apply(podWithName("my-cluster-kafka-1")), is(Collections.singletonList("manual rolling update")));
assertThat(kr.kafkaPodNeedsRestart.apply(podWithName("my-cluster-kafka-2")), is(Collections.singletonList("manual rolling update")));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorPodSetTest method testScaleUp.
/**
* Tests reconciliation with scale-up from 1 to 3 ZooKeeper pods
*
* @param context Test context
*/
@Test
public void testScaleUp(VertxTestContext context) {
Kafka oldKafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withReplicas(1).endZookeeper().editKafka().withReplicas(1).endKafka().endSpec().build();
ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, null);
KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSet(oldKafka.getSpec().getKafka().getReplicas(), false, null, null, brokerId -> null);
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture());
when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret()));
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS, true)));
ArgumentCaptor<String> cmReconciliationCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture());
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
// Kafka
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(oldZkPodSet));
ArgumentCaptor<StrimziPodSet> zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class);
when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
// Zoo
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(oldKafkaPodSet));
ArgumentCaptor<StrimziPodSet> kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class);
when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
StatefulSetOperator mockStsOps = supplier.stsOperations;
// Zoo STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(null));
// Kafka STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(null));
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
CrdOperator<KubernetesClient, Kafka, KafkaList> mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA));
when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
MockZooKeeperReconciler zr = new MockZooKeeperReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 1, CLUSTER_CA);
MockKafkaReconciler kr = new MockKafkaReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 1, CLUSTER_CA, CLIENTS_CA);
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), CERT_MANAGER, PASSWORD_GENERATOR, supplier, config, zr, kr);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Scale-up of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods.
assertThat(zkPodSetCaptor.getAllValues().size(), is(3));
// => first capture is from zkPodSet() with old replica count
assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(1));
// => second capture is from zkScalingUp() with new replica count
assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(2));
// => third capture is from zkScalingUp() with new replica count
assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3));
// Still one maybe-roll invocation
assertThat(zr.maybeRollZooKeeperInvocations, is(1));
// Scale-up of Kafka is done in one go => we should see two invocations (first from regular patching and second from scale-up)
assertThat(kafkaPodSetCaptor.getAllValues().size(), is(2));
// => first capture is from kafkaPodSet() with old replica count
assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(1));
// => second capture is from kafkaScaleUp() with new replica count
assertThat(kafkaPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(3));
// Still one maybe-roll invocation
assertThat(kr.maybeRollKafkaInvocations, is(1));
// CMs for all pods are reconciled
assertThat(cmReconciliationCaptor.getAllValues().size(), is(3));
assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2")));
// Only the shared CM is deleted
assertThat(cmDeletionCaptor.getAllValues().size(), is(1));
assertThat(cmDeletionCaptor.getAllValues().get(0), is("my-cluster-kafka-config"));
async.flag();
})));
}
Aggregations