use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class StrimziPodSetController method reconcile.
/**
* The main reconciliation logic which handles the reconciliations.
*
* @param reconciliation Reconciliation identifier used for logging
*/
private void reconcile(Reconciliation reconciliation) {
String name = reconciliation.name();
String namespace = reconciliation.namespace();
StrimziPodSet podSet = strimziPodSetLister.namespace(namespace).get(name);
if (podSet == null) {
LOGGER.debugCr(reconciliation, "StrimziPodSet is null => nothing to do");
} else if (!matchesCrSelector(podSet)) {
LOGGER.debugCr(reconciliation, "StrimziPodSet doesn't match the selector => nothing to do");
} else if (isDeleting(podSet)) {
// When the PodSet is deleted, the pod deletion is done by Kubernetes Garbage Collection. When the PodSet
// deletion is non-cascading, Kubernetes will remove the owner references. In order to avoid setting the
// owner reference again, we need to check if the PodSet is being deleted and if it is, we leave it to
// Kubernetes.
LOGGER.infoCr(reconciliation, "StrimziPodSet is deleting => nothing to do");
} else {
LOGGER.infoCr(reconciliation, "StrimziPodSet will be reconciled");
StrimziPodSetStatus status = new StrimziPodSetStatus();
status.setObservedGeneration(podSet.getMetadata().getGeneration());
try {
// This has to:
// 1) Create missing pods
// 2) Modify changed pods if needed (patch owner reference)
// 3) Delete scaled down pods
// Will be used later to find out if any pod needs to be deleted
Set<String> desiredPods = new HashSet<>(podSet.getSpec().getPods().size());
PodCounter podCounter = new PodCounter();
podCounter.pods = podSet.getSpec().getPods().size();
for (Map<String, Object> desiredPod : podSet.getSpec().getPods()) {
Pod pod = PodSetUtils.mapToPod(desiredPod);
desiredPods.add(pod.getMetadata().getName());
maybeCreateOrPatchPod(reconciliation, pod, ModelUtils.createOwnerReference(podSet), podCounter);
}
// Check if any pods needs to be deleted
removeDeletedPods(reconciliation, podSet.getSpec().getSelector(), desiredPods, podCounter);
status.setPods(podCounter.pods);
status.setReadyPods(podCounter.readyPods);
status.setCurrentPods(podCounter.currentPods);
} catch (Exception e) {
LOGGER.errorCr(reconciliation, "StrimziPodSet {} in namespace {} reconciliation failed", reconciliation.name(), reconciliation.namespace(), e);
status.addCondition(StatusUtils.buildConditionFromException("Error", "true", e));
} finally {
maybeUpdateStatus(reconciliation, podSet, status);
LOGGER.infoCr(reconciliation, "reconciled");
}
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class StrimziPodSetController method maybeUpdateStatus.
/**
* Updates the status of the StrimziPodSet. The status will be updated only when it changed since last time.
*
* @param reconciliation Reconciliation in which this is executed
* @param podSet Original pod set with the current status
* @param desiredStatus The desired status which should be set if it differs
*/
private void maybeUpdateStatus(Reconciliation reconciliation, StrimziPodSet podSet, StrimziPodSetStatus desiredStatus) {
if (!new StatusDiff(podSet.getStatus(), desiredStatus).isEmpty()) {
try {
LOGGER.debugCr(reconciliation, "Updating status of StrimziPodSet {} in namespace {}", reconciliation.name(), reconciliation.namespace());
StrimziPodSet latestPodSet = strimziPodSetLister.namespace(reconciliation.namespace()).get(reconciliation.name());
if (latestPodSet != null) {
StrimziPodSet updatedPodSet = new StrimziPodSetBuilder(latestPodSet).withStatus(desiredStatus).build();
strimziPodSetOperator.client().inNamespace(reconciliation.namespace()).withName(reconciliation.name()).patchStatus(updatedPodSet);
}
} catch (KubernetesClientException e) {
if (e.getCode() == 409) {
LOGGER.debugCr(reconciliation, "StrimziPodSet {} in namespace {} changed while trying to update status", reconciliation.name(), reconciliation.namespace());
} else if (e.getCode() == 404) {
LOGGER.debugCr(reconciliation, "StrimziPodSet {} in namespace {} was deleted while trying to update status", reconciliation.name(), reconciliation.namespace());
} else {
LOGGER.errorCr(reconciliation, "Failed to update status of StrimziPodSet {} in namespace {}", reconciliation.name(), reconciliation.namespace(), e);
}
}
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class ZooKeeperReconciler method podSet.
/**
* Create the StrimziPodSet for the ZooKeeper cluster with a specific number of pods. This is used directly
* during scale-ups or scale-downs.
*
* @param replicas Number of replicas which the PodSet should use
*
* @return Future which completes when the PodSet is created or updated
*/
private Future<Void> podSet(int replicas) {
if (featureGates.useStrimziPodSetsEnabled()) {
Map<String, String> podAnnotations = new LinkedHashMap<>(2);
podAnnotations.put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(ModelUtils.caCertGeneration(this.clusterCa)));
podAnnotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, loggingHash);
StrimziPodSet zkPodSet = zk.generatePodSet(replicas, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, podAnnotations);
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), zkPodSet).compose(rr -> {
podSetDiff = rr;
return Future.succeededFuture();
});
} else {
return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name())).compose(podSet -> {
if (podSet != null) {
return strimziPodSetOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), false);
} else {
return Future.succeededFuture();
}
});
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class KafkaAssemblyOperatorPodSetTest method testReconciliationWithRoll.
/**
* Tests the regular reconciliation of the Kafka cluster which results in some rolling updates
*
* @param context Test context
*/
@Test
public void testReconciliationWithRoll(VertxTestContext context) {
Kafka oldKafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withImage("old-image:latest").endZookeeper().editKafka().withImage("old-image:latest").endKafka().endSpec().build();
ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, null);
KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSet(KAFKA.getSpec().getKafka().getReplicas(), false, null, null, brokerId -> null);
ZookeeperCluster newZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
KafkaCluster newKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture());
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS, true)));
when(mockCmOps.reconcile(any(), any(), startsWith("my-cluster-kafka-"), any())).thenReturn(Future.succeededFuture());
when(mockCmOps.deleteAsync(any(), any(), eq("my-cluster-kafka-config"), anyBoolean())).thenReturn(Future.succeededFuture());
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), eq(newZkCluster.getName()))).thenReturn(Future.succeededFuture(oldZkPodSet));
when(mockPodSetOps.reconcile(any(), any(), eq(newZkCluster.getName()), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
when(mockPodSetOps.getAsync(any(), eq(newKafkaCluster.getName()))).thenReturn(Future.succeededFuture(oldKafkaPodSet));
when(mockPodSetOps.reconcile(any(), any(), eq(newKafkaCluster.getName()), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
StatefulSetOperator mockStsOps = supplier.stsOperations;
// Zoo STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(newZkCluster.getName()))).thenReturn(Future.succeededFuture(null));
// Kafka STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(newKafkaCluster.getName()))).thenReturn(Future.succeededFuture(null));
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(newZkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), eq(newKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList()));
CrdOperator<KubernetesClient, Kafka, KafkaList> mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA));
when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
MockZooKeeperReconciler zr = new MockZooKeeperReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 0, CLUSTER_CA);
MockKafkaReconciler kr = new MockKafkaReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 0, CLUSTER_CA, CLIENTS_CA);
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), CERT_MANAGER, PASSWORD_GENERATOR, supplier, config, zr, kr);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(zr.maybeRollZooKeeperInvocations, is(1));
assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-0")), is(List.of("Pod has old revision")));
assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-1")), is(List.of("Pod has old revision")));
assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-2")), is(List.of("Pod has old revision")));
assertThat(kr.maybeRollKafkaInvocations, is(1));
assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSet, "my-cluster-kafka-0")), is(List.of("Pod has old revision")));
assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSet, "my-cluster-kafka-1")), is(List.of("Pod has old revision")));
assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSet, "my-cluster-kafka-2")), is(List.of("Pod has old revision")));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class KafkaAssemblyOperatorPodSetTest method testScaleDown.
/**
* Tests reconciliation with scale-down from 5 to 3 ZooKeeper pods
*
* @param context Test context
*/
@Test
public void testScaleDown(VertxTestContext context) {
Kafka oldKafka = new KafkaBuilder(KAFKA).editSpec().editZookeeper().withReplicas(5).endZookeeper().editKafka().withReplicas(5).endKafka().endSpec().build();
ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, null);
KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS);
StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSet(oldKafka.getSpec().getKafka().getReplicas(), false, null, null, brokerId -> null);
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture());
when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret()));
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS, true)));
ArgumentCaptor<String> cmReconciliationCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture());
StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator;
// Zoo
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(oldZkPodSet));
ArgumentCaptor<StrimziPodSet> zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class);
when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
// Kafka
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(oldKafkaPodSet));
ArgumentCaptor<StrimziPodSet> kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class);
when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3))));
StatefulSetOperator mockStsOps = supplier.stsOperations;
// Zoo STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(null));
// Kafka STS is queried and deleted if it still exists
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(null));
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList()));
when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture());
CrdOperator<KubernetesClient, Kafka, KafkaList> mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA));
when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
MockZooKeeperReconciler zr = new MockZooKeeperReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 5, CLUSTER_CA);
MockKafkaReconciler kr = new MockKafkaReconciler(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), KAFKA, VERSION_CHANGE, null, 5, CLUSTER_CA, CLIENTS_CA);
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), CERT_MANAGER, PASSWORD_GENERATOR, supplier, config, zr, kr);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Scale-down of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods.
assertThat(zkPodSetCaptor.getAllValues().size(), is(3));
// => first capture is from zkPodSet() with old replica count
assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(5));
// => second capture is from zkScalingDown() with new replica count
assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(4));
// => third capture is from zkScalingDown() with new replica count
assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3));
// Still one maybe-roll invocation
assertThat(zr.maybeRollZooKeeperInvocations, is(1));
// Scale-down of Kafka is done in one go => we should see two invocations (first from regular patching and second from scale-down)
assertThat(kafkaPodSetCaptor.getAllValues().size(), is(2));
// => first capture is from kafkaScaleDown() with old replica count
assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(3));
// => second capture is from kafkaPodSet() with new replica count
assertThat(kafkaPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(3));
// Still one maybe-roll invocation
assertThat(kr.maybeRollKafkaInvocations, is(1));
// CMs for all remaining pods are reconciled
assertThat(cmReconciliationCaptor.getAllValues().size(), is(3));
assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2")));
// The shared CM + the CMs for scaled down pods are deleted
assertThat(cmDeletionCaptor.getAllValues().size(), is(3));
assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-kafka-3", "my-cluster-kafka-4", "my-cluster-kafka-config")));
async.flag();
})));
}
Aggregations