use of io.strimzi.api.kafka.model.Kafka in project strimzi by strimzi.
the class KafkaUtils method waitForKafkaStatusUpdate.
/**
* Waits for the Kafka Status to be updated after changed. It checks the generation and observed generation to
* ensure the status is up to date.
*
* @param namespaceName Namespace name
* @param clusterName Name of the Kafka cluster which should be checked
*/
public static void waitForKafkaStatusUpdate(String namespaceName, String clusterName) {
LOGGER.info("Waiting for Kafka status to be updated");
TestUtils.waitFor("KafkaStatus update", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> {
Kafka k = KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).get();
return k.getMetadata().getGeneration() == k.getStatus().getObservedGeneration();
});
}
use of io.strimzi.api.kafka.model.Kafka in project strimzi by strimzi.
the class KafkaCluster method generateJmxSecret.
/**
* Generate the Secret containing the username and password to secure the jmx port on the Kafka brokers.
*
* @param currentSecret The existing Secret with the current JMX credentials. Null if no secret exists yet.
*
* @return The generated Secret
*/
public Secret generateJmxSecret(Secret currentSecret) {
if (isJmxAuthenticated) {
PasswordGenerator passwordGenerator = new PasswordGenerator(16);
Map<String, String> data = new HashMap<>(2);
if (currentSecret != null && currentSecret.getData() != null) {
data.put(SECRET_JMX_USERNAME_KEY, currentSecret.getData().computeIfAbsent(SECRET_JMX_USERNAME_KEY, (key) -> Util.encodeToBase64(passwordGenerator.generate())));
data.put(SECRET_JMX_PASSWORD_KEY, currentSecret.getData().computeIfAbsent(SECRET_JMX_PASSWORD_KEY, (key) -> Util.encodeToBase64(passwordGenerator.generate())));
} else {
data.put(SECRET_JMX_USERNAME_KEY, Util.encodeToBase64(passwordGenerator.generate()));
data.put(SECRET_JMX_PASSWORD_KEY, Util.encodeToBase64(passwordGenerator.generate()));
}
return createJmxSecret(KafkaResources.kafkaJmxSecretName(cluster), data);
} else {
return null;
}
}
use of io.strimzi.api.kafka.model.Kafka in project strimzi by strimzi.
the class KafkaReconciler method scaleDown.
/**
* Scales down the Kafka cluster if needed. Kafka scale-down is done in one go.
*
* @return Future which completes when the scale-down is finished
*/
protected Future<Void> scaleDown() {
if (currentReplicas != 0 && currentReplicas > kafka.getReplicas()) {
// The previous (current) number of replicas is bigger than desired => we should scale-down
LOGGER.infoCr(reconciliation, "Scaling Kafka down from {} to {} replicas", currentReplicas, kafka.getReplicas());
if (featureGates.useStrimziPodSetsEnabled()) {
Set<String> desiredPodNames = new HashSet<>(kafka.getReplicas());
for (int i = 0; i < kafka.getReplicas(); i++) {
desiredPodNames.add(kafka.getPodName(i));
}
return strimziPodSetOperator.getAsync(reconciliation.namespace(), kafka.getName()).compose(podSet -> {
if (podSet == null) {
return Future.succeededFuture();
} else {
List<Map<String, Object>> desiredPods = podSet.getSpec().getPods().stream().filter(pod -> desiredPodNames.contains(PodSetUtils.mapToPod(pod).getMetadata().getName())).collect(Collectors.toList());
StrimziPodSet scaledDownPodSet = new StrimziPodSetBuilder(podSet).editSpec().withPods(desiredPods).endSpec().build();
return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), kafka.getName(), scaledDownPodSet).map((Void) null);
}
});
} else {
return stsOperator.scaleDown(reconciliation, reconciliation.namespace(), kafka.getName(), kafka.getReplicas()).map((Void) null);
}
} else {
// desired replicas => no need to scale-down
return Future.succeededFuture();
}
}
use of io.strimzi.api.kafka.model.Kafka in project strimzi by strimzi.
the class EntityOperatorTest method testImagePullSecretsFromCo.
@ParallelTest
public void testImagePullSecretsFromCo() {
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
List<LocalObjectReference> secrets = new ArrayList<>(2);
secrets.add(secret1);
secrets.add(secret2);
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withNewEntityOperator().withTopicOperator(entityTopicOperatorSpec).withUserOperator(entityUserOperatorSpec).endEntityOperator().endSpec().build();
EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS, true);
Deployment dep = eo.generateDeployment(true, null, secrets);
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true));
}
use of io.strimzi.api.kafka.model.Kafka in project strimzi by strimzi.
the class EntityOperatorTest method testFromCrdNoUserOperatorInEntityOperator.
@ParallelTest
public void testFromCrdNoUserOperatorInEntityOperator() {
EntityOperatorSpec entityOperatorSpec = new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withEntityOperator(entityOperatorSpec).endSpec().build();
EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS, true);
assertThat(entityOperator.topicOperator(), is(notNullValue()));
assertThat(entityOperator.userOperator(), is(nullValue()));
}
Aggregations