use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class StrimziPodSetCrdOperatorIT method testUpdateStatusAfterResourceUpdated.
/**
* Tests what happens when the resource is modified while updating the status
*
* @param context
*/
@Test
public void testUpdateStatusAfterResourceUpdated(VertxTestContext context) {
String resourceName = getResourceName(RESOURCE_NAME);
Checkpoint async = context.checkpoint();
String namespace = getNamespace();
StrimziPodSetOperator op = operator();
Promise updateStatus = Promise.promise();
// Required to be able to create the resource
readinessHelper(op, namespace, resourceName);
LOGGER.info("Creating resource");
op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)).onComplete(context.succeedingThenComplete()).compose(rrCreated -> {
StrimziPodSet updated = getResourceWithModifications(rrCreated.resource());
StrimziPodSet newStatus = getResourceWithNewReadyStatus(rrCreated.resource());
LOGGER.info("Updating resource (mocking an update due to some other reason)");
op.operation().inNamespace(namespace).withName(resourceName).patch(updated);
LOGGER.info("Updating resource status after underlying resource has changed");
return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus);
}).onComplete(context.succeeding(res -> context.verify(() -> {
assertThat(res.getMetadata().getName(), Matchers.is(resourceName));
assertThat(res.getMetadata().getNamespace(), Matchers.is(namespace));
updateStatus.complete();
})));
updateStatus.future().compose(v -> {
LOGGER.info("Deleting resource");
return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null);
}).onComplete(context.succeeding(v -> async.flag()));
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class StrimziPodSetCrdOperatorIT method testUpdateStatus.
@Test
public void testUpdateStatus(VertxTestContext context) {
String resourceName = getResourceName(RESOURCE_NAME);
Checkpoint async = context.checkpoint();
String namespace = getNamespace();
StrimziPodSetOperator op = operator();
// Required to be able to create the resource
readinessHelper(op, namespace, resourceName);
LOGGER.info("Creating resource");
op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)).onComplete(context.succeedingThenComplete()).compose(rrCreated -> {
StrimziPodSet newStatus = getResourceWithNewReadyStatus(rrCreated.resource());
LOGGER.info("Updating resource status");
return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus);
}).onComplete(context.succeedingThenComplete()).compose(rrModified -> op.getAsync(namespace, resourceName)).onComplete(context.succeeding(modifiedCustomResource -> context.verify(() -> {
assertReady(context, modifiedCustomResource);
}))).compose(rrModified -> {
LOGGER.info("Deleting resource");
return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null);
}).onComplete(context.succeeding(rrDeleted -> async.flag()));
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class KafkaReconciler method scaleDown.
/**
* Scales down the Kafka cluster if needed. Kafka scale-down is done in one go.
*
* @return Future which completes when the scale-down is finished
*/
protected Future<Void> scaleDown() {
if (currentReplicas != 0 && currentReplicas > kafka.getReplicas()) {
// The previous (current) number of replicas is bigger than desired => we should scale-down
LOGGER.infoCr(reconciliation, "Scaling Kafka down from {} to {} replicas", currentReplicas, kafka.getReplicas());
if (featureGates.useStrimziPodSetsEnabled()) {
Set<String> desiredPodNames = new HashSet<>(kafka.getReplicas());
for (int i = 0; i < kafka.getReplicas(); i++) {
desiredPodNames.add(kafka.getPodName(i));
}
return strimziPodSetOperator.getAsync(reconciliation.namespace(), kafka.getName()).compose(podSet -> {
if (podSet == null) {
return Future.succeededFuture();
} else {
List<Map<String, Object>> desiredPods = podSet.getSpec().getPods().stream().filter(pod -> desiredPodNames.contains(PodSetUtils.mapToPod(pod).getMetadata().getName())).collect(Collectors.toList());
StrimziPodSet scaledDownPodSet = new StrimziPodSetBuilder(podSet).editSpec().withPods(desiredPods).endSpec().build();
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), kafka.getName(), scaledDownPodSet).map((Void) null);
}
});
} else {
return stsOperator.scaleDown(reconciliation, reconciliation.namespace(), kafka.getName(), kafka.getReplicas()).map((Void) null);
}
} else {
// desired replicas => no need to scale-down
return Future.succeededFuture();
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class ManualPodCleaner method cleanPodPvcAndPodSet.
/**
* Handles the modification of the StrimziPodSet controlling the pod which should be cleaned. In order
* to clean the pod and its PVCs, we first need to remove the pod from the StrimziPodSet. Otherwise, the
* StrimziPodSet will break the process by recreating the pods or PVCs. This method first modifies the StrimziPodSet
* and then calls other method to delete the Pod, PVCs and create the new PVCs. Once this method completes, it
* will update the StrimziPodSet again. The Pod will be then recreated by the StrimziPodSet and this method just
* waits for it to become ready.
*
* The complete flow looks like this
* 1. Remove the deleted pod from the PodSet
* 2. Trigger the Pod and PVC deletion and recreation
* 3. Recreate the original PodSet
* 4. Wait for the Pod to be created and become ready
*
* @param podSetName Name of the StrimziPodSet to which this pod belongs
* @param podName Name of the Pod which should be cleaned / deleted
* @param desiredPvcs The list of desired PVCs which should be created after the old Pod and PVCs are deleted
* @param currentPvcs The list of current PVCs which should be deleted
*
* @return Future indicating the result of the cleanup
*/
private Future<Void> cleanPodPvcAndPodSet(String podSetName, String podName, List<PersistentVolumeClaim> desiredPvcs, List<PersistentVolumeClaim> currentPvcs) {
return strimziPodSetOperator.getAsync(reconciliation.namespace(), podSetName).compose(podSet -> {
List<Map<String, Object>> desiredPods = podSet.getSpec().getPods().stream().filter(pod -> !podName.equals(PodSetUtils.mapToPod(pod).getMetadata().getName())).collect(Collectors.toList());
StrimziPodSet reducedPodSet = new StrimziPodSetBuilder(podSet).editSpec().withPods(desiredPods).endSpec().build();
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), podSetName, reducedPodSet).compose(ignore -> cleanPodAndPvc(podName, desiredPvcs, currentPvcs)).compose(ignore -> {
// We recreate the StrimziPodSet in its old configuration => any further changes have to be done by rolling update
// These fields need to be cleared before recreating the StatefulSet
podSet.getMetadata().setResourceVersion(null);
podSet.getMetadata().setSelfLink(null);
podSet.getMetadata().setUid(null);
podSet.setStatus(null);
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), podSetName, podSet);
}).compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podName, 1_000L, operationTimeoutMs)).map((Void) null);
});
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.
the class StrimziPodSetController method maybeUpdateStatus.
/**
* Updates the status of the StrimziPodSet. The status will be updated only when it changed since last time.
*
* @param reconciliation Reconciliation in which this is executed
* @param podSet Original pod set with the current status
* @param desiredStatus The desired status which should be set if it differs
*/
private void maybeUpdateStatus(Reconciliation reconciliation, StrimziPodSet podSet, StrimziPodSetStatus desiredStatus) {
if (!new StatusDiff(podSet.getStatus(), desiredStatus).isEmpty()) {
try {
LOGGER.debugCr(reconciliation, "Updating status of StrimziPodSet {} in namespace {}", reconciliation.name(), reconciliation.namespace());
StrimziPodSet latestPodSet = strimziPodSetLister.namespace(reconciliation.namespace()).get(reconciliation.name());
if (latestPodSet != null) {
StrimziPodSet updatedPodSet = new StrimziPodSetBuilder(latestPodSet).withStatus(desiredStatus).build();
strimziPodSetOperator.client().inNamespace(reconciliation.namespace()).withName(reconciliation.name()).patchStatus(updatedPodSet);
}
} catch (KubernetesClientException e) {
if (e.getCode() == 409) {
LOGGER.debugCr(reconciliation, "StrimziPodSet {} in namespace {} changed while trying to update status", reconciliation.name(), reconciliation.namespace());
} else if (e.getCode() == 404) {
LOGGER.debugCr(reconciliation, "StrimziPodSet {} in namespace {} was deleted while trying to update status", reconciliation.name(), reconciliation.namespace());
} else {
LOGGER.errorCr(reconciliation, "Failed to update status of StrimziPodSet {} in namespace {}", reconciliation.name(), reconciliation.namespace(), e);
}
}
}
}
Aggregations