use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class StorageClassValidator method validate.
@Override
public void validate(StackGresDistributedLogsReview review) throws ValidationFailed {
StackGresDistributedLogs distributedLogs = review.getRequest().getObject();
if (distributedLogs == null) {
return;
}
String storageClass = distributedLogs.getSpec().getPersistentVolume().getStorageClass();
switch(review.getRequest().getOperation()) {
case CREATE:
checkIfStorageClassExist(storageClass, "Storage class " + storageClass + " not found");
break;
case UPDATE:
checkIfStorageClassExist(storageClass, "Cannot update to storage class " + storageClass + " because it doesn't exists");
break;
default:
}
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class Fluentd method buildSource.
@NotNull
public HasMetadata buildSource(StackGresDistributedLogsContext context) {
final StackGresDistributedLogs cluster = context.getSource();
final String namespace = cluster.getMetadata().getNamespace();
final String databaseList = context.getConnectedClusters().stream().map(FluentdUtil::databaseName).collect(Collectors.joining("\n"));
final Map<String, String> data = ImmutableMap.of("fluentd.conf", getFluentdConfig(context), "databases", databaseList);
final Map<String, String> clusterLabels = labelFactory.clusterLabels(cluster);
return new ConfigMapBuilder().withNewMetadata().withNamespace(namespace).withName(FluentdUtil.configName(cluster)).withLabels(clusterLabels).endMetadata().withData(data).build();
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class Fluentd method generateResource.
@Override
public Stream<HasMetadata> generateResource(StackGresDistributedLogsContext context) {
final StackGresDistributedLogs cluster = context.getSource();
final String namespace = cluster.getMetadata().getNamespace();
final Map<String, String> labels = labelFactory.patroniPrimaryLabels(cluster);
final Service service = new ServiceBuilder().withNewMetadata().withNamespace(namespace).withName(FluentdUtil.serviceName(cluster)).withLabels(labels).endMetadata().withNewSpec().withSelector(labels).withPorts(new ServicePortBuilder().withProtocol("TCP").withName(FluentdUtil.FORWARD_PORT_NAME).withPort(FluentdUtil.FORWARD_PORT).withTargetPort(new IntOrString(FluentdUtil.FORWARD_PORT_NAME)).build()).withType("ClusterIP").endSpec().build();
return Seq.of(service);
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class PatroniConfigEndpoints method generateResource.
@Override
public Stream<HasMetadata> generateResource(StackGresDistributedLogsContext context) {
PatroniConfig patroniConf = new PatroniConfig();
patroniConf.setTtl(30);
patroniConf.setLoopWait(10);
patroniConf.setRetryTimeout(10);
patroniConf.setPostgresql(new PatroniConfig.PostgreSql());
patroniConf.getPostgresql().setUsePgRewind(true);
patroniConf.getPostgresql().setParameters(getPostgresConfigValues(context));
final String patroniConfigJson = objectMapper.valueToTree(patroniConf).toString();
StackGresDistributedLogs cluster = context.getSource();
final Map<String, String> labels = labelFactory.patroniClusterLabels(cluster);
return Stream.of(new EndpointsBuilder().withNewMetadata().withNamespace(cluster.getMetadata().getNamespace()).withName(configName(context)).withLabels(labels).withAnnotations(ImmutableMap.of(PATRONI_CONFIG_KEY, patroniConfigJson)).endMetadata().build());
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class PatroniConfigMap method buildSource.
@NotNull
public HasMetadata buildSource(StackGresDistributedLogsContext context) {
final StackGresDistributedLogs cluster = context.getSource();
final String pgVersion = StackGresDistributedLogsUtil.getPostgresVersion();
final String patroniLabels;
final Map<String, String> value = labelFactory.patroniClusterLabels(cluster);
try {
patroniLabels = objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new RuntimeException(ex);
}
// NOPMD
final String pgHost = "0.0.0.0";
final int pgRawPort = EnvoyUtil.PG_PORT;
final int pgPort = EnvoyUtil.PG_PORT;
Map<String, String> data = new HashMap<>();
data.put("PATRONI_SCOPE", labelFactory.clusterScope(cluster));
data.put("PATRONI_KUBERNETES_SCOPE_LABEL", labelFactory.labelMapper().clusterScopeKey());
data.put("PATRONI_KUBERNETES_LABELS", patroniLabels);
data.put("PATRONI_KUBERNETES_USE_ENDPOINTS", "true");
data.put("PATRONI_KUBERNETES_PORTS", getKubernetesPorts(pgPort, pgRawPort));
data.put("PATRONI_SUPERUSER_USERNAME", "postgres");
data.put("PATRONI_REPLICATION_USERNAME", "replicator");
data.put("PATRONI_POSTGRESQL_LISTEN", pgHost + ":" + EnvoyUtil.PG_PORT);
data.put("PATRONI_POSTGRESQL_CONNECT_ADDRESS", "${PATRONI_KUBERNETES_POD_IP}:" + pgRawPort);
data.put("PATRONI_RESTAPI_LISTEN", "0.0.0.0:8008");
data.put("PATRONI_POSTGRESQL_DATA_DIR", PatroniEnvPaths.PG_DATA_PATH.getPath());
data.put("PATRONI_POSTGRESQL_BIN_DIR", "/usr/lib/postgresql/" + pgVersion + "/bin");
data.put("PATRONI_POSTGRES_UNIX_SOCKET_DIRECTORY", PatroniEnvPaths.PG_RUN_PATH.getPath());
if (PATRONI_LOGGER.isTraceEnabled()) {
data.put("PATRONI_LOG_LEVEL", "DEBUG");
}
data.put("PATRONI_SCRIPTS", "1");
return new ConfigMapBuilder().withNewMetadata().withNamespace(cluster.getMetadata().getNamespace()).withName(name(context)).withLabels(value).endMetadata().withData(StackGresUtil.addMd5Sum(data)).build();
}
Aggregations