use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class StrimziPodSetControllerMockTest method testScaleUpScaleDown.
/**
* Tests scaling up and down of the StrimziPodSet and updates of the StrimziPodSet status.
*
* @param context Test context
*/
@Test
public void testScaleUpScaleDown(VertxTestContext context) {
String podSetName = "scale-up-down";
String pod1Name = podSetName + "-0";
String pod2Name = podSetName + "-1";
try {
Pod pod1 = pod(pod1Name, KAFKA_NAME, podSetName);
podSetOp().inNamespace(NAMESPACE).create(podSet(podSetName, KAFKA_NAME, pod1));
// Wait until the pod is ready
TestUtils.waitFor("Wait for Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(pod1Name).isReady(), () -> context.failNow("Test timed out waiting for pod readiness!"));
// Check status of the PodSet
TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
}, () -> context.failNow("Pod stats do not match"));
// Scale-up the pod-set
Pod pod2 = pod(pod2Name, KAFKA_NAME, podSetName);
podSetOp().inNamespace(NAMESPACE).withName(podSetName).patch(podSet(podSetName, KAFKA_NAME, pod1, pod2));
// Wait until the new pod is ready
TestUtils.waitFor("Wait for second Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(pod2Name).isReady(), () -> context.failNow("Test timed out waiting for second pod readiness!"));
// Check status of the PodSet
TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
return podSet.getStatus().getCurrentPods() == 2 && podSet.getStatus().getReadyPods() == 2 && podSet.getStatus().getPods() == 2;
}, () -> context.failNow("Pod stats do not match"));
// Scale-down the pod-set
podSetOp().inNamespace(NAMESPACE).withName(podSetName).patch(podSet(podSetName, KAFKA_NAME, pod1));
// Wait until the pod is ready
TestUtils.waitFor("Wait for second Pod to be deleted", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(pod2Name).get() == null, () -> context.failNow("Test timed out waiting for second pod to be deleted!"));
// Check status of the PodSet
TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
}, () -> context.failNow("Pod stats do not match"));
context.completeNow();
} finally {
podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class StrimziPodSetControllerMockTest method testPodCreationDeletionAndRecreation.
/*
* Tests
*/
/**
* Tests the basic operations:
* - Creation of StrimziPodSet and the managed pod
* - Re-creation of the managed pod when it is deleted
* - Deletion of the StrimziPodSet and the managed pod
*
* @param context Test context
*/
@Test
public void testPodCreationDeletionAndRecreation(VertxTestContext context) {
String podSetName = "basic-test";
String podName = podSetName + "-0";
try {
Pod pod = pod(podName, KAFKA_NAME, podSetName);
podSetOp().inNamespace(NAMESPACE).create(podSet(podSetName, KAFKA_NAME, pod));
// Check that pod is created
TestUtils.waitFor("Wait for Pod to be created", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).get() != null, () -> context.failNow("Test timed out waiting for pod creation!"));
// Wait until the pod is ready
TestUtils.waitFor("Wait for Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).isReady(), () -> context.failNow("Test timed out waiting for pod readiness!"));
Pod actualPod = client.pods().inNamespace(NAMESPACE).withName(podName).get();
// Check OwnerReference was added
checkOwnerReference(actualPod, podSetName);
// We keep the resource version for pod re-creation test
String resourceVersion = actualPod.getMetadata().getResourceVersion();
// Check status of the PodSet
TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
}, () -> context.failNow("Pod stats do not match"));
// Delete the pod and test that it is recreated
client.pods().inNamespace(NAMESPACE).withName(podName).delete();
// Check that pod is created
TestUtils.waitFor("Wait for Pod to be recreated", 100, 10_000, () -> {
Pod p = client.pods().inNamespace(NAMESPACE).withName(podName).get();
return p != null && !resourceVersion.equals(p.getMetadata().getResourceVersion());
}, () -> context.failNow("Test timed out waiting for pod recreation!"));
context.completeNow();
} finally {
podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaReconciler method podSet.
/**
* Create or update the StrimziPodSet for the Kafka cluster. When PodSets are disabled, it will try to delete the
* old PodSet. If set, it uses the old replica count since scaling-up happens only later in a separate step.
*
* @return Future which completes when the PodSet is created, updated or deleted
*/
protected Future<Void> podSet() {
if (featureGates.useStrimziPodSetsEnabled()) {
// PodSets are enabled => create/update the StrimziPodSet for Kafka
int replicas;
if (currentReplicas != 0 && currentReplicas < kafka.getReplicas()) {
// If there is previous replica count & it is smaller than the desired replica count, we use the
// previous one because the scale-up will happen only later during the reconciliation
replicas = currentReplicas;
} else {
// If there is no previous number of replicas (because the PodSet did not exist) or if the
// previous replicas are bigger than desired replicas we use desired replicas (scale-down already
// happened)
replicas = kafka.getReplicas();
}
StrimziPodSet kafkaPodSet = kafka.generatePodSet(replicas, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, brokerId -> kafkaPodAnnotations(brokerId, false));
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), kafkaPodSet).compose(rr -> {
podSetDiff = rr;
return Future.succeededFuture();
});
} else {
// PodSets are disabled => delete the StrimziPodSet for Kafka
return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())).compose(podSet -> {
if (podSet != null) {
return strimziPodSetOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), false);
} else {
return Future.succeededFuture();
}
});
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class StrimziPodSetController method enqueuePod.
/**
* Checks if the Pod which should be enqueued belongs to a StrimziPodSet this controller manages and whether the
* Kafka cluster which owns it matches the CR selector. If it does, it will enqueue the reconciliation. This is used
* to enqueue reconciliations based on Pod events.
*
* Note: The reconciliation is enqueued per StrimziPodSet to which the pod belongs and not based on the Pod itself.
*
* @param pod Pod which should be checked and possibly enqueued
* @param action The action from the event which triggered this
*/
private void enqueuePod(Pod pod, String action) {
LOGGER.debugOp("Pod {} in namespace {} was {}", pod.getMetadata().getName(), pod.getMetadata().getNamespace(), action);
StrimziPodSet parentPodSet = strimziPodSetLister.namespace(pod.getMetadata().getNamespace()).list().stream().filter(podSet -> podSet.getSpec() != null && Util.matchesSelector(Optional.ofNullable(podSet.getSpec().getSelector()), pod)).findFirst().orElse(null);
if (parentPodSet != null) {
if (matchesCrSelector(parentPodSet)) {
enqueue(new SimplifiedReconciliation(parentPodSet.getMetadata().getNamespace(), parentPodSet.getMetadata().getName()));
} else {
LOGGER.debugOp("Pod {} in namespace {} was {} but does not belong to a Kafka cluster managed by this operator", pod.getMetadata().getName(), pod.getMetadata().getNamespace(), action);
}
} else {
LOGGER.debugOp("Pod {} in namespace {} which was {} does not seem to be controlled by any StrimziPodSet and will be ignored", pod.getMetadata().getName(), pod.getMetadata().getNamespace(), action);
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class StrimziPodSetController method reconcile.
/**
* The main reconciliation logic which handles the reconciliations.
*
* @param reconciliation Reconciliation identifier used for logging
*/
private void reconcile(Reconciliation reconciliation) {
String name = reconciliation.name();
String namespace = reconciliation.namespace();
StrimziPodSet podSet = strimziPodSetLister.namespace(namespace).get(name);
if (podSet == null) {
LOGGER.debugCr(reconciliation, "StrimziPodSet is null => nothing to do");
} else if (!matchesCrSelector(podSet)) {
LOGGER.debugCr(reconciliation, "StrimziPodSet doesn't match the selector => nothing to do");
} else if (isDeleting(podSet)) {
// When the PodSet is deleted, the pod deletion is done by Kubernetes Garbage Collection. When the PodSet
// deletion is non-cascading, Kubernetes will remove the owner references. In order to avoid setting the
// owner reference again, we need to check if the PodSet is being deleted and if it is, we leave it to
// Kubernetes.
LOGGER.infoCr(reconciliation, "StrimziPodSet is deleting => nothing to do");
} else {
LOGGER.infoCr(reconciliation, "StrimziPodSet will be reconciled");
StrimziPodSetStatus status = new StrimziPodSetStatus();
status.setObservedGeneration(podSet.getMetadata().getGeneration());
try {
// This has to:
// 1) Create missing pods
// 2) Modify changed pods if needed (patch owner reference)
// 3) Delete scaled down pods
// Will be used later to find out if any pod needs to be deleted
Set<String> desiredPods = new HashSet<>(podSet.getSpec().getPods().size());
PodCounter podCounter = new PodCounter();
podCounter.pods = podSet.getSpec().getPods().size();
for (Map<String, Object> desiredPod : podSet.getSpec().getPods()) {
Pod pod = PodSetUtils.mapToPod(desiredPod);
desiredPods.add(pod.getMetadata().getName());
maybeCreateOrPatchPod(reconciliation, pod, ModelUtils.createOwnerReference(podSet), podCounter);
}
// Check if any pods needs to be deleted
removeDeletedPods(reconciliation, podSet.getSpec().getSelector(), desiredPods, podCounter);
status.setPods(podCounter.pods);
status.setReadyPods(podCounter.readyPods);
status.setCurrentPods(podCounter.currentPods);
} catch (Exception e) {
LOGGER.errorCr(reconciliation, "StrimziPodSet {} in namespace {} reconciliation failed", reconciliation.name(), reconciliation.namespace(), e);
status.addCondition(StatusUtils.buildConditionFromException("Error", "true", e));
} finally {
maybeUpdateStatus(reconciliation, podSet, status);
LOGGER.infoCr(reconciliation, "reconciled");
}
}
}
Aggregations