use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class KafkaStatusTest method testKafkaListenerNodePortAddressSameNode.
@Test
public void testKafkaListenerNodePortAddressSameNode(VertxTestContext context) throws ParseException {
Kafka kafka = new KafkaBuilder(getKafkaCrd()).editOrNewSpec().editOrNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()).endKafka().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the CRD Operator for Kafka resources
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka);
ArgumentCaptor<Kafka> kafkaCaptor = ArgumentCaptor.forClass(Kafka.class);
when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the KafkaSetOperator
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(eq(namespace), eq(KafkaCluster.kafkaClusterName(clusterName)))).thenReturn(Future.succeededFuture(kafkaCluster.generateStatefulSet(false, null, null, null)));
// Mock the StrimziPodSet operator
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
// Mock the ConfigMapOperator
ConfigMapOperator mockCmOps = supplier.configMapOperations;
when(mockCmOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafkaCluster.generateMetricsAndLogConfigMap(new MetricsAndLogging(null, null))));
// Mock Pods Operator
Pod pod0 = new PodBuilder().withNewMetadata().withName(clusterName + "-kafka-" + 0).endMetadata().withNewStatus().withHostIP("10.0.0.1").endStatus().build();
Pod pod1 = new PodBuilder().withNewMetadata().withName(clusterName + "-kafka-" + 1).endMetadata().withNewStatus().withHostIP("10.0.0.1").endStatus().build();
Pod pod2 = new PodBuilder().withNewMetadata().withName(clusterName + "-kafka-" + 2).endMetadata().withNewStatus().withHostIP("10.0.0.1").endStatus().build();
List<Pod> pods = new ArrayList<>();
pods.add(pod0);
pods.add(pod1);
pods.add(pod2);
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(eq(namespace), any(Labels.class))).thenReturn(Future.succeededFuture(pods));
// Mock Node operator
NodeOperator mockNodeOps = supplier.nodeOperator;
when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(getClusterNodes()));
MockNodePortStatusKafkaAssemblyOperator kao = new MockNodePortStatusKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)).onComplete(res -> {
assertThat(res.succeeded(), is(true));
assertThat(kafkaCaptor.getValue(), is(notNullValue()));
assertThat(kafkaCaptor.getValue().getStatus(), is(notNullValue()));
KafkaStatus status = kafkaCaptor.getValue().getStatus();
assertThat(status.getListeners().size(), is(1));
assertThat(status.getListeners().get(0).getType(), is("external"));
assertThat(status.getListeners().get(0).getName(), is("external"));
List<ListenerAddress> addresses = status.getListeners().get(0).getAddresses();
assertThat(addresses.size(), is(1));
List<ListenerAddress> expected = new ArrayList<>();
expected.add(new ListenerAddressBuilder().withHost("50.35.18.119").withPort(31234).build());
async.flag();
});
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class KafkaReconciler method podSet.
/**
* Create or update the StrimziPodSet for the Kafka cluster. When PodSets are disabled, it will try to delete the
* old PodSet. If set, it uses the old replica count since scaling-up happens only later in a separate step.
*
* @return Future which completes when the PodSet is created, updated or deleted
*/
protected Future<Void> podSet() {
if (featureGates.useStrimziPodSetsEnabled()) {
// PodSets are enabled => create/update the StrimziPodSet for Kafka
int replicas;
if (currentReplicas != 0 && currentReplicas < kafka.getReplicas()) {
// If there is previous replica count & it is smaller than the desired replica count, we use the
// previous one because the scale-up will happen only later during the reconciliation
replicas = currentReplicas;
} else {
// If there is no previous number of replicas (because the PodSet did not exist) or if the
// previous replicas are bigger than desired replicas we use desired replicas (scale-down already
// happened)
replicas = kafka.getReplicas();
}
StrimziPodSet kafkaPodSet = kafka.generatePodSet(replicas, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, brokerId -> kafkaPodAnnotations(brokerId, false));
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), kafkaPodSet).compose(rr -> {
podSetDiff = rr;
return Future.succeededFuture();
});
} else {
// PodSets are disabled => delete the StrimziPodSet for Kafka
return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())).compose(podSet -> {
if (podSet != null) {
return strimziPodSetOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), false);
} else {
return Future.succeededFuture();
}
});
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class KafkaReconciler method scaleDown.
/**
* Scales down the Kafka cluster if needed. Kafka scale-down is done in one go.
*
* @return Future which completes when the scale-down is finished
*/
protected Future<Void> scaleDown() {
if (currentReplicas != 0 && currentReplicas > kafka.getReplicas()) {
// The previous (current) number of replicas is bigger than desired => we should scale-down
LOGGER.infoCr(reconciliation, "Scaling Kafka down from {} to {} replicas", currentReplicas, kafka.getReplicas());
if (featureGates.useStrimziPodSetsEnabled()) {
Set<String> desiredPodNames = new HashSet<>(kafka.getReplicas());
for (int i = 0; i < kafka.getReplicas(); i++) {
desiredPodNames.add(kafka.getPodName(i));
}
return strimziPodSetOperator.getAsync(reconciliation.namespace(), kafka.getName()).compose(podSet -> {
if (podSet == null) {
return Future.succeededFuture();
} else {
List<Map<String, Object>> desiredPods = podSet.getSpec().getPods().stream().filter(pod -> desiredPodNames.contains(PodSetUtils.mapToPod(pod).getMetadata().getName())).collect(Collectors.toList());
StrimziPodSet scaledDownPodSet = new StrimziPodSetBuilder(podSet).editSpec().withPods(desiredPods).endSpec().build();
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), kafka.getName(), scaledDownPodSet).map((Void) null);
}
});
} else {
return stsOperator.scaleDown(reconciliation, reconciliation.namespace(), kafka.getName(), kafka.getReplicas()).map((Void) null);
}
} else {
// desired replicas => no need to scale-down
return Future.succeededFuture();
}
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class ManualPodCleaner method cleanPodPvcAndPodSet.
/**
* Handles the modification of the StrimziPodSet controlling the pod which should be cleaned. In order
* to clean the pod and its PVCs, we first need to remove the pod from the StrimziPodSet. Otherwise, the
* StrimziPodSet will break the process by recreating the pods or PVCs. This method first modifies the StrimziPodSet
* and then calls other method to delete the Pod, PVCs and create the new PVCs. Once this method completes, it
* will update the StrimziPodSet again. The Pod will be then recreated by the StrimziPodSet and this method just
* waits for it to become ready.
*
* The complete flow looks like this
* 1. Remove the deleted pod from the PodSet
* 2. Trigger the Pod and PVC deletion and recreation
* 3. Recreate the original PodSet
* 4. Wait for the Pod to be created and become ready
*
* @param podSetName Name of the StrimziPodSet to which this pod belongs
* @param podName Name of the Pod which should be cleaned / deleted
* @param desiredPvcs The list of desired PVCs which should be created after the old Pod and PVCs are deleted
* @param currentPvcs The list of current PVCs which should be deleted
*
* @return Future indicating the result of the cleanup
*/
private Future<Void> cleanPodPvcAndPodSet(String podSetName, String podName, List<PersistentVolumeClaim> desiredPvcs, List<PersistentVolumeClaim> currentPvcs) {
return strimziPodSetOperator.getAsync(reconciliation.namespace(), podSetName).compose(podSet -> {
List<Map<String, Object>> desiredPods = podSet.getSpec().getPods().stream().filter(pod -> !podName.equals(PodSetUtils.mapToPod(pod).getMetadata().getName())).collect(Collectors.toList());
StrimziPodSet reducedPodSet = new StrimziPodSetBuilder(podSet).editSpec().withPods(desiredPods).endSpec().build();
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), podSetName, reducedPodSet).compose(ignore -> cleanPodAndPvc(podName, desiredPvcs, currentPvcs)).compose(ignore -> {
// We recreate the StrimziPodSet in its old configuration => any further changes have to be done by rolling update
// These fields need to be cleared before recreating the StatefulSet
podSet.getMetadata().setResourceVersion(null);
podSet.getMetadata().setSelfLink(null);
podSet.getMetadata().setUid(null);
podSet.setStatus(null);
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), podSetName, podSet);
}).compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podName, 1_000L, operationTimeoutMs)).map((Void) null);
});
}
use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi by strimzi.
the class StrimziPodSetController method enqueuePod.
/**
* Checks if the Pod which should be enqueued belongs to a StrimziPodSet this controller manages and whether the
* Kafka cluster which owns it matches the CR selector. If it does, it will enqueue the reconciliation. This is used
* to enqueue reconciliations based on Pod events.
*
* Note: The reconciliation is enqueued per StrimziPodSet to which the pod belongs and not based on the Pod itself.
*
* @param pod Pod which should be checked and possibly enqueued
* @param action The action from the event which triggered this
*/
private void enqueuePod(Pod pod, String action) {
LOGGER.debugOp("Pod {} in namespace {} was {}", pod.getMetadata().getName(), pod.getMetadata().getNamespace(), action);
StrimziPodSet parentPodSet = strimziPodSetLister.namespace(pod.getMetadata().getNamespace()).list().stream().filter(podSet -> podSet.getSpec() != null && Util.matchesSelector(Optional.ofNullable(podSet.getSpec().getSelector()), pod)).findFirst().orElse(null);
if (parentPodSet != null) {
if (matchesCrSelector(parentPodSet)) {
enqueue(new SimplifiedReconciliation(parentPodSet.getMetadata().getNamespace(), parentPodSet.getMetadata().getName()));
} else {
LOGGER.debugOp("Pod {} in namespace {} was {} but does not belong to a Kafka cluster managed by this operator", pod.getMetadata().getName(), pod.getMetadata().getNamespace(), action);
}
} else {
LOGGER.debugOp("Pod {} in namespace {} which was {} does not seem to be controlled by any StrimziPodSet and will be ignored", pod.getMetadata().getName(), pod.getMetadata().getNamespace(), action);
}
}
Aggregations