use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class Main method maybeCreateClusterRoles.
/*test*/
static Future<Void> maybeCreateClusterRoles(Vertx vertx, ClusterOperatorConfig config, KubernetesClient client) {
if (config.isCreateClusterRoles()) {
@SuppressWarnings({ "rawtypes" }) List<Future> futures = new ArrayList<>();
ClusterRoleOperator cro = new ClusterRoleOperator(vertx, client);
Map<String, String> clusterRoles = new HashMap<>(6);
clusterRoles.put("strimzi-cluster-operator-namespaced", "020-ClusterRole-strimzi-cluster-operator-role.yaml");
clusterRoles.put("strimzi-cluster-operator-global", "021-ClusterRole-strimzi-cluster-operator-role.yaml");
clusterRoles.put("strimzi-kafka-broker", "030-ClusterRole-strimzi-kafka-broker.yaml");
clusterRoles.put("strimzi-entity-operator", "031-ClusterRole-strimzi-entity-operator.yaml");
clusterRoles.put("strimzi-kafka-client", "033-ClusterRole-strimzi-kafka-client.yaml");
for (Map.Entry<String, String> clusterRole : clusterRoles.entrySet()) {
LOGGER.info("Creating cluster role {}", clusterRole.getKey());
try (BufferedReader br = new BufferedReader(new InputStreamReader(Main.class.getResourceAsStream("/cluster-roles/" + clusterRole.getValue()), StandardCharsets.UTF_8))) {
String yaml = br.lines().collect(Collectors.joining(System.lineSeparator()));
ClusterRole role = ClusterRoleOperator.convertYamlToClusterRole(yaml);
@SuppressWarnings({ "rawtypes" }) Future fut = cro.reconcile(new Reconciliation("start-cluster-operator", "Deployment", config.getOperatorNamespace(), "cluster-operator"), role.getMetadata().getName(), role);
futures.add(fut);
} catch (IOException e) {
LOGGER.error("Failed to create Cluster Roles.", e);
throw new RuntimeException(e);
}
}
Promise<Void> returnPromise = Promise.promise();
CompositeFuture.all(futures).onComplete(res -> {
if (res.succeeded()) {
returnPromise.complete();
} else {
returnPromise.fail("Failed to create Cluster Roles.");
}
});
return returnPromise.future();
} else {
return Future.succeededFuture();
}
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaListenersReconciler method ingresses.
/**
* Makes sure all desired ingresses are updated and the rest is deleted.
*
* @return Future which completes when all ingresses are created or deleted.
*/
protected Future<Void> ingresses() {
if (!pfa.hasIngressV1()) {
return Future.succeededFuture();
}
List<Ingress> ingresses = new ArrayList<>(kafka.generateExternalBootstrapIngresses());
int replicas = kafka.getReplicas();
for (int i = 0; i < replicas; i++) {
ingresses.addAll(kafka.generateExternalIngresses(i));
}
return ingressOperator.listAsync(reconciliation.namespace(), kafka.getSelectorLabels()).compose(existingIngresses -> {
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> ingressFutures = new ArrayList<>(ingresses.size());
List<String> existingIngressNames = existingIngresses.stream().map(ingress -> ingress.getMetadata().getName()).collect(Collectors.toList());
LOGGER.debugCr(reconciliation, "Reconciling existing Ingresses {} against the desired ingresses", existingIngressNames);
// Update desired ingresses
for (Ingress ingress : ingresses) {
String ingressName = ingress.getMetadata().getName();
existingIngressNames.remove(ingressName);
ingressFutures.add(ingressOperator.reconcile(reconciliation, reconciliation.namespace(), ingressName, ingress));
}
LOGGER.debugCr(reconciliation, "Ingresses {} should be deleted", existingIngressNames);
// Delete ingresses which match our selector but are not desired anymore
for (String ingressName : existingIngressNames) {
ingressFutures.add(ingressOperator.reconcile(reconciliation, reconciliation.namespace(), ingressName, null));
}
return CompositeFuture.join(ingressFutures).map((Void) null);
});
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaListenersReconciler method loadBalancerServicesReady.
/**
* Makes sure all services related to load balancers are ready and collects their addresses for Statuses,
* certificates and advertised addresses. This method for all Load Balancer type listeners:
* 1) Checks if the bootstrap service has been provisioned (has a loadbalancer address)
* 2) Collects the relevant addresses and stores them for use in certificates and in CR status
* 3) Checks if the broker services have been provisioned (have a loadbalancer address)
* 4) Collects the loadbalancer addresses for certificates and advertised hostnames
*
* @return Future which completes when all Load Balancer services are ready and their addresses are collected
*/
protected Future<Void> loadBalancerServicesReady() {
List<GenericKafkaListener> loadBalancerListeners = ListenersUtils.loadBalancerListeners(kafka.getListeners());
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> listenerFutures = new ArrayList<>(loadBalancerListeners.size());
for (GenericKafkaListener listener : loadBalancerListeners) {
String bootstrapServiceName = ListenersUtils.backwardsCompatibleBootstrapServiceName(reconciliation.name(), listener);
List<String> bootstrapListenerAddressList = new ArrayList<>(kafka.getReplicas());
Future<Void> perListenerFut = Future.succeededFuture().compose(i -> {
if (ListenersUtils.skipCreateBootstrapService(listener)) {
return Future.succeededFuture();
} else {
return serviceOperator.hasIngressAddress(reconciliation, reconciliation.namespace(), bootstrapServiceName, 1_000, operationTimeoutMs).compose(res -> serviceOperator.getAsync(reconciliation.namespace(), bootstrapServiceName)).compose(svc -> {
String bootstrapAddress;
if (svc.getStatus().getLoadBalancer().getIngress().get(0).getHostname() != null) {
bootstrapAddress = svc.getStatus().getLoadBalancer().getIngress().get(0).getHostname();
} else {
bootstrapAddress = svc.getStatus().getLoadBalancer().getIngress().get(0).getIp();
}
LOGGER.debugCr(reconciliation, "Found address {} for Service {}", bootstrapAddress, bootstrapServiceName);
result.bootstrapDnsNames.add(bootstrapAddress);
bootstrapListenerAddressList.add(bootstrapAddress);
return Future.succeededFuture();
});
}
}).compose(res -> {
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> perPodFutures = new ArrayList<>(kafka.getReplicas());
for (int pod = 0; pod < kafka.getReplicas(); pod++) {
perPodFutures.add(serviceOperator.hasIngressAddress(reconciliation, reconciliation.namespace(), ListenersUtils.backwardsCompatibleBrokerServiceName(reconciliation.name(), pod, listener), 1_000, operationTimeoutMs));
}
return CompositeFuture.join(perPodFutures);
}).compose(res -> {
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> perPodFutures = new ArrayList<>(kafka.getReplicas());
for (int brokerId = 0; brokerId < kafka.getReplicas(); brokerId++) {
final int finalBrokerId = brokerId;
Future<Void> perBrokerFut = serviceOperator.getAsync(reconciliation.namespace(), ListenersUtils.backwardsCompatibleBrokerServiceName(reconciliation.name(), brokerId, listener)).compose(svc -> {
String brokerAddress;
if (svc.getStatus().getLoadBalancer().getIngress().get(0).getHostname() != null) {
brokerAddress = svc.getStatus().getLoadBalancer().getIngress().get(0).getHostname();
} else {
brokerAddress = svc.getStatus().getLoadBalancer().getIngress().get(0).getIp();
}
LOGGER.debugCr(reconciliation, "Found address {} for Service {}", brokerAddress, svc.getMetadata().getName());
if (ListenersUtils.skipCreateBootstrapService(listener)) {
bootstrapListenerAddressList.add(brokerAddress);
}
result.brokerDnsNames.computeIfAbsent(finalBrokerId, k -> new HashSet<>(2)).add(brokerAddress);
String advertisedHostname = ListenersUtils.brokerAdvertisedHost(listener, finalBrokerId);
if (advertisedHostname != null) {
result.brokerDnsNames.get(finalBrokerId).add(ListenersUtils.brokerAdvertisedHost(listener, finalBrokerId));
}
registerAdvertisedHostname(finalBrokerId, listener, brokerAddress);
registerAdvertisedPort(finalBrokerId, listener, listener.getPort());
return Future.succeededFuture();
});
perPodFutures.add(perBrokerFut);
}
return CompositeFuture.join(perPodFutures);
}).compose(res -> {
ListenerStatus ls = new ListenerStatusBuilder().withName(listener.getName()).withAddresses(bootstrapListenerAddressList.stream().map(listenerAddress -> new ListenerAddressBuilder().withHost(listenerAddress).withPort(listener.getPort()).build()).collect(Collectors.toList())).build();
result.listenerStatuses.add(ls);
return Future.succeededFuture();
});
listenerFutures.add(perListenerFut);
}
return CompositeFuture.join(listenerFutures).map((Void) null);
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaListenersReconciler method customListenerCertificates.
/**
* Collects the custom listener certificates from the secrets and stores them for later use
*
* @return Future which completes when all custom listener certificates are collected and are valid
*/
protected Future<Map<String, String>> customListenerCertificates() {
List<String> secretNames = kafka.getListeners().stream().filter(listener -> listener.isTls() && listener.getConfiguration() != null && listener.getConfiguration().getBrokerCertChainAndKey() != null).map(listener -> listener.getConfiguration().getBrokerCertChainAndKey().getSecretName()).distinct().collect(Collectors.toList());
LOGGER.debugCr(reconciliation, "Validating secret {} with custom TLS listener certificates", secretNames);
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> secretFutures = new ArrayList<>(secretNames.size());
Map<String, Secret> customSecrets = new HashMap<>(secretNames.size());
for (String secretName : secretNames) {
Future<Secret> fut = secretOperator.getAsync(reconciliation.namespace(), secretName).compose(secret -> {
if (secret != null) {
customSecrets.put(secretName, secret);
LOGGER.debugCr(reconciliation, "Found secrets {} with custom TLS listener certificate", secretName);
}
return Future.succeededFuture();
});
secretFutures.add(fut);
}
return CompositeFuture.join(secretFutures).compose(res -> {
List<String> errors = new ArrayList<>();
Map<String, String> customListenerCertificates = new HashMap<>();
for (GenericKafkaListener listener : kafka.getListeners()) {
if (listener.isTls() && listener.getConfiguration() != null && listener.getConfiguration().getBrokerCertChainAndKey() != null) {
CertAndKeySecretSource customCert = listener.getConfiguration().getBrokerCertChainAndKey();
Secret secret = customSecrets.get(customCert.getSecretName());
if (secret != null) {
if (!secret.getData().containsKey(customCert.getCertificate())) {
errors.add("Secret " + customCert.getSecretName() + " does not contain certificate under the key " + customCert.getCertificate() + ".");
} else if (!secret.getData().containsKey(customCert.getKey())) {
errors.add("Secret " + customCert.getSecretName() + " does not contain custom certificate private key under the key " + customCert.getKey() + ".");
} else {
byte[] publicKeyBytes = Base64.getDecoder().decode(secret.getData().get(customCert.getCertificate()));
customListenerCertificates.put(listener.getName(), new String(publicKeyBytes, StandardCharsets.US_ASCII));
result.customListenerCertificateThumbprints.put(listener.getName(), getCertificateThumbprint(secret, customCert));
}
} else {
errors.add("Secret " + customCert.getSecretName() + " with custom TLS certificate does not exist.");
}
}
}
if (errors.isEmpty()) {
return Future.succeededFuture(customListenerCertificates);
} else {
LOGGER.errorCr(reconciliation, "Failed to process Secrets with custom certificates: {}", errors);
return Future.failedFuture(new InvalidResourceException("Failed to process Secrets with custom certificates: " + errors));
}
});
}
use of io.strimzi.operator.common.Reconciliation in project strimzi by strimzi.
the class KafkaListenersReconciler method nodePortServicesReady.
/**
* Makes sure all services related to node ports are ready and collects their addresses for Statuses,
* certificates and advertised addresses. This method for all NodePort type listeners:
* 1) Checks if the bootstrap service has been provisioned (has a node port)
* 2) Collects the node port for use in CR status
* 3) Checks it the broker services have been provisioned (have a node port)
* 4) Collects the node ports for advertised hostnames
*
* @return Future which completes when all Node Port services are ready and their ports are collected
*/
protected Future<Void> nodePortServicesReady() {
List<GenericKafkaListener> loadBalancerListeners = ListenersUtils.nodePortListeners(kafka.getListeners());
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> listenerFutures = new ArrayList<>(loadBalancerListeners.size());
for (GenericKafkaListener listener : loadBalancerListeners) {
String bootstrapServiceName = ListenersUtils.backwardsCompatibleBootstrapServiceName(reconciliation.name(), listener);
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) Future perListenerFut = serviceOperator.hasNodePort(reconciliation, reconciliation.namespace(), bootstrapServiceName, 1_000, operationTimeoutMs).compose(res -> serviceOperator.getAsync(reconciliation.namespace(), bootstrapServiceName)).compose(svc -> {
Integer externalBootstrapNodePort = svc.getSpec().getPorts().get(0).getNodePort();
LOGGER.debugCr(reconciliation, "Found node port {} for Service {}", externalBootstrapNodePort, bootstrapServiceName);
result.bootstrapNodePorts.put(ListenersUtils.identifier(listener), externalBootstrapNodePort);
return Future.succeededFuture();
}).compose(res -> {
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> perPodFutures = new ArrayList<>(kafka.getReplicas());
for (int pod = 0; pod < kafka.getReplicas(); pod++) {
perPodFutures.add(serviceOperator.hasNodePort(reconciliation, reconciliation.namespace(), ListenersUtils.backwardsCompatibleBrokerServiceName(reconciliation.name(), pod, listener), 1_000, operationTimeoutMs));
}
return CompositeFuture.join(perPodFutures);
}).compose(res -> {
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> perPodFutures = new ArrayList<>(kafka.getReplicas());
for (int brokerId = 0; brokerId < kafka.getReplicas(); brokerId++) {
final int finalBrokerId = brokerId;
Future<Void> perBrokerFut = serviceOperator.getAsync(reconciliation.namespace(), ListenersUtils.backwardsCompatibleBrokerServiceName(reconciliation.name(), brokerId, listener)).compose(svc -> {
Integer externalBrokerNodePort = svc.getSpec().getPorts().get(0).getNodePort();
LOGGER.debugCr(reconciliation, "Found node port {} for Service {}", externalBrokerNodePort, svc.getMetadata().getName());
registerAdvertisedPort(finalBrokerId, listener, externalBrokerNodePort);
String advertisedHostname = ListenersUtils.brokerAdvertisedHost(listener, finalBrokerId);
if (advertisedHostname != null) {
result.brokerDnsNames.computeIfAbsent(finalBrokerId, k -> new HashSet<>(1)).add(advertisedHostname);
}
registerAdvertisedHostname(finalBrokerId, listener, nodePortAddressEnvVar(listener));
return Future.succeededFuture();
});
perPodFutures.add(perBrokerFut);
}
return CompositeFuture.join(perPodFutures);
}).compose(res -> {
ListenerStatus ls = new ListenerStatusBuilder().withName(listener.getName()).build();
result.listenerStatuses.add(ls);
return Future.succeededFuture();
});
listenerFutures.add(perListenerFut);
}
return CompositeFuture.join(listenerFutures).map((Void) null);
}
Aggregations