use of io.strimzi.operator.cluster.operator.resource.KafkaRoller in project strimzi by strimzi.
the class CaReconciler method rollingUpdateForNewCaKey.
/**
* Perform a rolling update of the cluster so that CA certificates get added to their truststores, or expired CA
* certificates get removed from their truststores. Note this is only necessary when the CA certificate has changed
* due to a new CA key. It is not necessary when the CA certificate is replace while retaining the existing key.
*/
Future<Void> rollingUpdateForNewCaKey() {
List<String> reason = new ArrayList<>(2);
if (clusterCa.keyReplaced()) {
reason.add("trust new cluster CA certificate signed by new key");
}
if (clientsCa.keyReplaced()) {
reason.add("trust new clients CA certificate signed by new key");
}
if (!reason.isEmpty()) {
Future<Void> zkRollFuture;
Function<Pod, List<String>> rollPodAndLogReason = pod -> {
LOGGER.debugCr(reconciliation, "Rolling Pod {} to {}", pod.getMetadata().getName(), reason);
return reason;
};
if (clusterCa.keyReplaced()) {
// ZooKeeper is rolled only for new Cluster CA key
Labels zkSelectorLabels = Labels.EMPTY.withStrimziKind(reconciliation.kind()).withStrimziCluster(reconciliation.name()).withStrimziName(KafkaResources.zookeeperStatefulSetName(reconciliation.name()));
zkRollFuture = new ZooKeeperRoller(podOperator, zookeeperLeaderFinder, operationTimeoutMs).maybeRollingUpdate(reconciliation, zkSelectorLabels, rollPodAndLogReason, clusterCa.caCertSecret(), oldCoSecret);
} else {
zkRollFuture = Future.succeededFuture();
}
return zkRollFuture.compose(i -> {
if (featureGates.useStrimziPodSetsEnabled()) {
return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())).compose(podSet -> {
if (podSet != null) {
return Future.succeededFuture(KafkaCluster.generatePodList(reconciliation.name(), podSet.getSpec().getPods().size()));
} else {
return Future.succeededFuture(List.<String>of());
}
});
} else {
return stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())).compose(sts -> {
if (sts != null) {
return Future.succeededFuture(KafkaCluster.generatePodList(reconciliation.name(), sts.getSpec().getReplicas()));
} else {
return Future.succeededFuture(List.<String>of());
}
});
}
}).compose(replicas -> new KafkaRoller(reconciliation, vertx, podOperator, 1_000, operationTimeoutMs, () -> new BackOff(250, 2, 10), replicas, clusterCa.caCertSecret(), oldCoSecret, adminClientProvider, brokerId -> null, null, null, false).rollingRestart(rollPodAndLogReason)).compose(i -> {
if (clusterCa.keyReplaced()) {
// EO, KE and CC need to be rolled only for new Cluster CA key.
return rollDeploymentIfExists(KafkaResources.entityOperatorDeploymentName(reconciliation.name()), reason.toString()).compose(i2 -> rollDeploymentIfExists(KafkaExporterResources.deploymentName(reconciliation.name()), reason.toString())).compose(i2 -> rollDeploymentIfExists(CruiseControlResources.deploymentName(reconciliation.name()), reason.toString()));
} else {
return Future.succeededFuture();
}
});
} else {
return Future.succeededFuture();
}
}
use of io.strimzi.operator.cluster.operator.resource.KafkaRoller in project strimzi-kafka-operator by strimzi.
the class KafkaReconciler method perBrokerKafkaConfiguration.
/**
* Generates and creates the ConfigMaps with per-broker configuration for Kafka brokers used in PodSets. It will
* also delete the ConfigMaps for any scaled-down brokers (scale down is done before this is called in the
* reconciliation)
*
* @param metricsAndLogging Metrics and Logging configuration
*
* @return Future which completes when the Kafka Configuration is prepared
*/
protected Future<Void> perBrokerKafkaConfiguration(MetricsAndLogging metricsAndLogging) {
return configMapOperator.listAsync(reconciliation.namespace(), kafka.getSelectorLabels()).compose(existingConfigMaps -> {
// This is used during Kafka rolling updates -> we have to store it for later
this.logging = kafka.loggingConfiguration(kafka.getLogging(), metricsAndLogging.getLoggingCm());
this.loggingHash = Util.hashStub(Util.getLoggingDynamicallyUnmodifiableEntries(logging));
List<ConfigMap> desiredConfigMaps = kafka.generatePerBrokerConfigurationConfigMaps(metricsAndLogging, listenerReconciliationResults.advertisedHostnames, listenerReconciliationResults.advertisedPorts, featureGates.controlPlaneListenerEnabled());
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> ops = new ArrayList<>(existingConfigMaps.size() + kafka.getReplicas());
// Delete all existing ConfigMaps which are not desired and are not the shared config map
List<String> desiredNames = new ArrayList<>(desiredConfigMaps.size() + 1);
// We do not want to delete the shared ConfigMap, so we add it here
desiredNames.add(kafka.getAncillaryConfigMapName());
desiredNames.addAll(desiredConfigMaps.stream().map(cm -> cm.getMetadata().getName()).collect(Collectors.toList()));
for (ConfigMap cm : existingConfigMaps) {
// We delete the cms not on the desired names list
if (!desiredNames.contains(cm.getMetadata().getName())) {
ops.add(configMapOperator.deleteAsync(reconciliation, reconciliation.namespace(), cm.getMetadata().getName(), true));
}
}
// Create / update the desired config maps
for (ConfigMap cm : desiredConfigMaps) {
String cmName = cm.getMetadata().getName();
int brokerId = getPodIndexFromPodName(cmName);
// The advertised hostname and port might change. If they change, we need to roll the pods.
// Here we collect their hash to trigger the rolling update. For per-broker configuration,
// we need just the advertised hostnames / ports for given broker.
String brokerConfiguration = listenerReconciliationResults.advertisedHostnames.get(brokerId).entrySet().stream().map(kv -> kv.getKey() + "://" + kv.getValue()).sorted().collect(Collectors.joining(" "));
brokerConfiguration += listenerReconciliationResults.advertisedPorts.get(brokerId).entrySet().stream().map(kv -> kv.getKey() + "://" + kv.getValue()).sorted().collect(Collectors.joining(" "));
brokerConfiguration += cm.getData().getOrDefault(KafkaCluster.BROKER_LISTENERS_FILENAME, "");
// Changes to regular Kafka configuration are handled through the KafkaRoller which decides whether to roll the pod or not
// In addition to that, we have to handle changes to configuration unknown to Kafka -> different plugins (Authorization, Quotas etc.)
// This is captured here with the unknown configurations and the hash is used to roll the pod when it changes
KafkaConfiguration kc = KafkaConfiguration.unvalidated(reconciliation, cm.getData().getOrDefault(KafkaCluster.BROKER_CONFIGURATION_FILENAME, ""));
// We store hash of the broker configurations for later use in Pod and in rolling updates
this.brokerConfigurationHash.put(brokerId, Util.hashStub(brokerConfiguration + kc.unknownConfigsWithValues(kafka.getKafkaVersion()).toString()));
ops.add(configMapOperator.reconcile(reconciliation, reconciliation.namespace(), cmName, cm));
}
return CompositeFuture.join(ops).map((Void) null);
});
}
use of io.strimzi.operator.cluster.operator.resource.KafkaRoller in project strimzi by strimzi.
the class KafkaReconciler method perBrokerKafkaConfiguration.
/**
* Generates and creates the ConfigMaps with per-broker configuration for Kafka brokers used in PodSets. It will
* also delete the ConfigMaps for any scaled-down brokers (scale down is done before this is called in the
* reconciliation)
*
* @param metricsAndLogging Metrics and Logging configuration
*
* @return Future which completes when the Kafka Configuration is prepared
*/
protected Future<Void> perBrokerKafkaConfiguration(MetricsAndLogging metricsAndLogging) {
return configMapOperator.listAsync(reconciliation.namespace(), kafka.getSelectorLabels()).compose(existingConfigMaps -> {
// This is used during Kafka rolling updates -> we have to store it for later
this.logging = kafka.loggingConfiguration(kafka.getLogging(), metricsAndLogging.getLoggingCm());
this.loggingHash = Util.hashStub(Util.getLoggingDynamicallyUnmodifiableEntries(logging));
List<ConfigMap> desiredConfigMaps = kafka.generatePerBrokerConfigurationConfigMaps(metricsAndLogging, listenerReconciliationResults.advertisedHostnames, listenerReconciliationResults.advertisedPorts, featureGates.controlPlaneListenerEnabled());
// Has to use Raw type because of the CompositeFuture
@SuppressWarnings({ "rawtypes" }) List<Future> ops = new ArrayList<>(existingConfigMaps.size() + kafka.getReplicas());
// Delete all existing ConfigMaps which are not desired and are not the shared config map
List<String> desiredNames = new ArrayList<>(desiredConfigMaps.size() + 1);
// We do not want to delete the shared ConfigMap, so we add it here
desiredNames.add(kafka.getAncillaryConfigMapName());
desiredNames.addAll(desiredConfigMaps.stream().map(cm -> cm.getMetadata().getName()).collect(Collectors.toList()));
for (ConfigMap cm : existingConfigMaps) {
// We delete the cms not on the desired names list
if (!desiredNames.contains(cm.getMetadata().getName())) {
ops.add(configMapOperator.deleteAsync(reconciliation, reconciliation.namespace(), cm.getMetadata().getName(), true));
}
}
// Create / update the desired config maps
for (ConfigMap cm : desiredConfigMaps) {
String cmName = cm.getMetadata().getName();
int brokerId = getPodIndexFromPodName(cmName);
// The advertised hostname and port might change. If they change, we need to roll the pods.
// Here we collect their hash to trigger the rolling update. For per-broker configuration,
// we need just the advertised hostnames / ports for given broker.
String brokerConfiguration = listenerReconciliationResults.advertisedHostnames.get(brokerId).entrySet().stream().map(kv -> kv.getKey() + "://" + kv.getValue()).sorted().collect(Collectors.joining(" "));
brokerConfiguration += listenerReconciliationResults.advertisedPorts.get(brokerId).entrySet().stream().map(kv -> kv.getKey() + "://" + kv.getValue()).sorted().collect(Collectors.joining(" "));
brokerConfiguration += cm.getData().getOrDefault(KafkaCluster.BROKER_LISTENERS_FILENAME, "");
// Changes to regular Kafka configuration are handled through the KafkaRoller which decides whether to roll the pod or not
// In addition to that, we have to handle changes to configuration unknown to Kafka -> different plugins (Authorization, Quotas etc.)
// This is captured here with the unknown configurations and the hash is used to roll the pod when it changes
KafkaConfiguration kc = KafkaConfiguration.unvalidated(reconciliation, cm.getData().getOrDefault(KafkaCluster.BROKER_CONFIGURATION_FILENAME, ""));
// We store hash of the broker configurations for later use in Pod and in rolling updates
this.brokerConfigurationHash.put(brokerId, Util.hashStub(brokerConfiguration + kc.unknownConfigsWithValues(kafka.getKafkaVersion()).toString()));
ops.add(configMapOperator.reconcile(reconciliation, reconciliation.namespace(), cmName, cm));
}
return CompositeFuture.join(ops).map((Void) null);
});
}
use of io.strimzi.operator.cluster.operator.resource.KafkaRoller in project strimzi-kafka-operator by strimzi.
the class CaReconciler method rollingUpdateForNewCaKey.
/**
* Perform a rolling update of the cluster so that CA certificates get added to their truststores, or expired CA
* certificates get removed from their truststores. Note this is only necessary when the CA certificate has changed
* due to a new CA key. It is not necessary when the CA certificate is replace while retaining the existing key.
*/
Future<Void> rollingUpdateForNewCaKey() {
List<String> reason = new ArrayList<>(2);
if (clusterCa.keyReplaced()) {
reason.add("trust new cluster CA certificate signed by new key");
}
if (clientsCa.keyReplaced()) {
reason.add("trust new clients CA certificate signed by new key");
}
if (!reason.isEmpty()) {
Future<Void> zkRollFuture;
Function<Pod, List<String>> rollPodAndLogReason = pod -> {
LOGGER.debugCr(reconciliation, "Rolling Pod {} to {}", pod.getMetadata().getName(), reason);
return reason;
};
if (clusterCa.keyReplaced()) {
// ZooKeeper is rolled only for new Cluster CA key
Labels zkSelectorLabels = Labels.EMPTY.withStrimziKind(reconciliation.kind()).withStrimziCluster(reconciliation.name()).withStrimziName(KafkaResources.zookeeperStatefulSetName(reconciliation.name()));
zkRollFuture = new ZooKeeperRoller(podOperator, zookeeperLeaderFinder, operationTimeoutMs).maybeRollingUpdate(reconciliation, zkSelectorLabels, rollPodAndLogReason, clusterCa.caCertSecret(), oldCoSecret);
} else {
zkRollFuture = Future.succeededFuture();
}
return zkRollFuture.compose(i -> {
if (featureGates.useStrimziPodSetsEnabled()) {
return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())).compose(podSet -> {
if (podSet != null) {
return Future.succeededFuture(KafkaCluster.generatePodList(reconciliation.name(), podSet.getSpec().getPods().size()));
} else {
return Future.succeededFuture(List.<String>of());
}
});
} else {
return stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())).compose(sts -> {
if (sts != null) {
return Future.succeededFuture(KafkaCluster.generatePodList(reconciliation.name(), sts.getSpec().getReplicas()));
} else {
return Future.succeededFuture(List.<String>of());
}
});
}
}).compose(replicas -> new KafkaRoller(reconciliation, vertx, podOperator, 1_000, operationTimeoutMs, () -> new BackOff(250, 2, 10), replicas, clusterCa.caCertSecret(), oldCoSecret, adminClientProvider, brokerId -> null, null, null, false).rollingRestart(rollPodAndLogReason)).compose(i -> {
if (clusterCa.keyReplaced()) {
// EO, KE and CC need to be rolled only for new Cluster CA key.
return rollDeploymentIfExists(KafkaResources.entityOperatorDeploymentName(reconciliation.name()), reason.toString()).compose(i2 -> rollDeploymentIfExists(KafkaExporterResources.deploymentName(reconciliation.name()), reason.toString())).compose(i2 -> rollDeploymentIfExists(CruiseControlResources.deploymentName(reconciliation.name()), reason.toString()));
} else {
return Future.succeededFuture();
}
});
} else {
return Future.succeededFuture();
}
}
Aggregations