use of io.stackgres.common.distributedlogs.Tables in project stackgres by ongres.
the class DistributedLogsClusterReconciliator method reconcile.
@SuppressFBWarnings(value = "REC_CATCH_EXCEPTION", justification = "False positives")
protected ReconciliationResult<Boolean> reconcile(KubernetesClient client, StackGresDistributedLogsContext context) throws Exception {
StackGresDistributedLogs distributedLogs = context.getDistributedLogs();
if (distributedLogs.getStatus() == null || !isPatroniReady(context)) {
LOGGER.warn("Waiting for distributedlogs cluster to become ready...");
return new ReconciliationResult<>(false);
}
final ImmutableList.Builder<Exception> exceptions = ImmutableList.builder();
boolean statusUpdated = false;
for (StackGresDistributedLogsStatusCluster cluster : distributedLogs.getStatus().getConnectedClusters()) {
String database = FluentdUtil.databaseName(cluster.getNamespace(), cluster.getName());
try {
if (!databaseManager.existsDatabase(context, database)) {
LOGGER.info("Creating database {}", database);
databaseManager.createDatabase(context, database);
}
} catch (Exception ex) {
exceptions.add(ex);
handleException(client, distributedLogs, cluster, ex);
continue;
}
String retention = cluster.getConfig().getRetention();
if (!Optional.of(distributedLogs.getStatus().getDatabases()).flatMap(databases -> databases.stream().filter(databaseStatus -> databaseStatus.getName().equals(database)).findAny()).map(StackGresDistributedLogsStatusDatabase::getRetention).map(currentRetention -> Objects.equals(retention, currentRetention)).orElse(false)) {
for (String table : Seq.of(Tables.values()).map(Tables::getTableName)) {
LOGGER.info("Updating retention window for database {} and table to {}", database, retention);
try {
databaseManager.updateRetention(context, database, retention, table);
} catch (Exception ex) {
exceptions.add(ex);
handleException(client, distributedLogs, cluster, ex);
continue;
}
}
}
if (retention != null) {
for (String table : Seq.of(Tables.values()).map(Tables::getTableName)) {
try {
databaseManager.reconcileRetention(context, database, retention, table).stream().forEach(output -> LOGGER.info("Reconcile retention for database {} and table {}: {}", database, table, output));
} catch (Exception ex) {
exceptions.add(ex);
handleException(client, distributedLogs, cluster, ex);
continue;
}
}
}
statusUpdated = statusUpdated || updateStatus(distributedLogs, database, retention);
}
String fluentdConfigHash = configManager.getFluentdConfigHash();
if (!Objects.equals(distributedLogs.getStatus().getFluentdConfigHash(), fluentdConfigHash)) {
LOGGER.info("Reloading fluentd configuration");
configManager.reloadFluentdConfiguration();
distributedLogs.getStatus().setFluentdConfigHash(fluentdConfigHash);
}
return new ReconciliationResult<>(statusUpdated, exceptions.build());
}
Aggregations