use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method reconcileConnectors.
/**
* Reconcile all the connectors selected by the given connect instance, updated each connectors status with the result.
* @param reconciliation The reconciliation
* @param connect The connector
* @param connectStatus Status of the KafkaConnect resource (will be used to set the available
* connector plugins)
* @param scaledToZero Indicated whether the related Connect cluster is currently scaled to 0 replicas
* @return A future, failed if any of the connectors' statuses could not be updated.
*/
protected Future<Void> reconcileConnectors(Reconciliation reconciliation, T connect, S connectStatus, boolean scaledToZero, String desiredLogging, OrderedProperties defaultLogging) {
String connectName = connect.getMetadata().getName();
String namespace = connect.getMetadata().getNamespace();
String host = KafkaConnectResources.qualifiedServiceName(connectName, namespace);
if (!isUseResources(connect)) {
return Future.succeededFuture();
}
if (scaledToZero) {
return connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, zeroReplicas(namespace, connectName))).collect(Collectors.toList()))).map((Void) null);
}
KafkaConnectApi apiClient = connectClientProvider.apply(vertx);
return CompositeFuture.join(apiClient.list(host, port), connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())), apiClient.listConnectorPlugins(reconciliation, host, port), apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging)).compose(cf -> {
List<String> runningConnectorNames = cf.resultAt(0);
List<KafkaConnector> desiredConnectors = cf.resultAt(1);
List<ConnectorPlugin> connectorPlugins = cf.resultAt(2);
LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status");
connectStatus.setConnectorPlugins(connectorPlugins);
Set<String> deleteConnectorNames = new HashSet<>(runningConnectorNames);
deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames);
Stream<Future<Void>> deletionFutures = deleteConnectorNames.stream().map(connectorName -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null));
LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors);
Stream<Future<Void>> createUpdateFutures = desiredConnectors.stream().map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));
return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null);
}).recover(error -> {
if (error instanceof ConnectTimeoutException) {
Promise<Void> connectorStatuses = Promise.promise();
LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status");
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, error)).collect(Collectors.toList()))).onComplete(ignore -> connectorStatuses.fail(error));
return connectorStatuses.future();
} else {
return Future.failedFuture(error);
}
});
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class KafkaBrokerConfigurationDiff method diff.
/**
* Computes diff between two maps. Entries in IGNORABLE_PROPERTIES are skipped
* @param brokerId id of compared broker
* @param desired desired configuration, may be null if the related ConfigMap does not exist yet or no changes are required
* @param brokerConfigs current configuration
* @param configModel default configuration for {@code kafkaVersion} of broker
* @return Collection of AlterConfigOp containing all entries which were changed from current in desired configuration
*/
private Collection<AlterConfigOp> diff(int brokerId, String desired, Config brokerConfigs, Map<String, ConfigModel> configModel) {
if (brokerConfigs == null || desired == null) {
return Collections.emptyList();
}
Map<String, String> currentMap;
Collection<AlterConfigOp> updatedCE = new ArrayList<>();
currentMap = brokerConfigs.entries().stream().collect(Collectors.toMap(ConfigEntry::name, configEntry -> configEntry.value() == null ? "null" : configEntry.value()));
OrderedProperties orderedProperties = new OrderedProperties();
orderedProperties.addStringPairs(desired);
Map<String, String> desiredMap = orderedProperties.asMap();
fillPlaceholderValue(desiredMap, Integer.toString(brokerId));
JsonNode source = patchMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true).valueToTree(currentMap);
JsonNode target = patchMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true).valueToTree(desiredMap);
JsonNode jsonDiff = JsonDiff.asJson(source, target);
for (JsonNode d : jsonDiff) {
String pathValue = d.get("path").asText();
String pathValueWithoutSlash = pathValue.substring(1);
Optional<ConfigEntry> optEntry = brokerConfigs.entries().stream().filter(configEntry -> configEntry.name().equals(pathValueWithoutSlash)).findFirst();
String op = d.get("op").asText();
if (optEntry.isPresent()) {
ConfigEntry entry = optEntry.get();
if ("remove".equals(op)) {
removeProperty(configModel, updatedCE, pathValueWithoutSlash, entry);
} else if ("replace".equals(op)) {
// entry is in the current, desired is updated value
updateOrAdd(entry.name(), configModel, desiredMap, updatedCE);
}
} else {
if ("add".equals(op)) {
// entry is not in the current, it is added
updateOrAdd(pathValueWithoutSlash, configModel, desiredMap, updatedCE);
}
}
if ("remove".equals(op)) {
// there is a lot of properties set by default - not having them in desired causes very noisy log output
LOGGER.traceCr(reconciliation, "Kafka Broker {} Config Differs : {}", brokerId, d);
LOGGER.traceCr(reconciliation, "Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue));
LOGGER.traceCr(reconciliation, "Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue));
} else {
LOGGER.debugCr(reconciliation, "Kafka Broker {} Config Differs : {}", brokerId, d);
LOGGER.debugCr(reconciliation, "Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue));
LOGGER.debugCr(reconciliation, "Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue));
}
}
return updatedCE;
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class AbstractModel method maybeAddMonitorIntervalToExternalLogging.
/**
* Adds 'monitorInterval=30' to external logging ConfigMap. If ConfigMap already has this value, it is persisted.
*
* @param data String with log4j2 properties in format key=value separated by new lines
* @return log4j2 configuration with monitorInterval property
*/
protected String maybeAddMonitorIntervalToExternalLogging(String data) {
OrderedProperties orderedProperties = new OrderedProperties();
orderedProperties.addStringPairs(data);
Optional<String> mi = orderedProperties.asMap().keySet().stream().filter(key -> key.matches("^monitorInterval$")).findFirst();
if (mi.isPresent()) {
return data;
} else {
// do not override custom value
return data + "\nmonitorInterval=" + LOG4J2_MONITOR_INTERVAL + "\n";
}
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class TestConfigurationWithoutDefaults method testNonEmptyJson.
@ParallelTest
public void testNonEmptyJson() {
JsonObject configuration = new JsonObject().put("var1", "aaa").put("var2", "bbb").put("var3", "ccc");
OrderedProperties expectedConfiguration = createWithDefaults("var3", "ccc", "var2", "bbb", "var1", "aaa");
AbstractConfiguration config = new TestConfiguration(configuration);
assertThat(config.asOrderedProperties(), is(expectedConfiguration));
}
use of io.strimzi.operator.common.model.OrderedProperties in project strimzi-kafka-operator by strimzi.
the class TestConfigurationWithoutDefaults method testJsonWithForbiddenKeys.
@ParallelTest
public void testJsonWithForbiddenKeys() {
JsonObject configuration = new JsonObject().put("var1", "aaa").put("var2", "bbb").put("var3", "ccc").put("forbidden.option", "ddd");
OrderedProperties expectedConfiguration = createWithDefaults("var3", "ccc", "var2", "bbb", "var1", "aaa");
AbstractConfiguration config = new TestConfiguration(configuration);
assertThat(config.asOrderedProperties(), is(expectedConfiguration));
}
Aggregations