use of io.stackgres.apiweb.dto.cluster.ClusterSpec in project stackgres by ongres.
the class ClusterResourceMockedTest method checkDto.
@Override
protected void checkDto(ClusterDto dto, StackGresCluster resource) {
if (resource.getMetadata() != null) {
assertNotNull(dto.getMetadata());
assertEquals(resource.getMetadata().getNamespace(), dto.getMetadata().getNamespace());
assertEquals(resource.getMetadata().getName(), dto.getMetadata().getName());
assertEquals(resource.getMetadata().getUid(), dto.getMetadata().getUid());
} else {
assertNull(dto.getMetadata());
}
final StackGresClusterSpec resourceSpec = resource.getSpec();
final ClusterSpec dtoSpec = dto.getSpec();
if (resourceSpec != null) {
assertNotNull(dtoSpec);
assertEquals(resourceSpec.getInstances(), dtoSpec.getInstances());
assertEquals(resourceSpec.getPostgres().getVersion(), dtoSpec.getPostgres().getVersion());
assertEquals(resourceSpec.getPrometheusAutobind(), dtoSpec.getPrometheusAutobind());
assertEquals(resourceSpec.getResourceProfile(), dtoSpec.getSgInstanceProfile());
final ClusterConfiguration dtoClusterConfigurations = dtoSpec.getConfigurations();
final StackGresClusterConfiguration resourceClusterConfiguration = resourceSpec.getConfiguration();
if (resourceClusterConfiguration != null) {
assertNotNull(dtoClusterConfigurations);
assertEquals(resourceClusterConfiguration.getBackupConfig(), dtoClusterConfigurations.getSgBackupConfig());
assertEquals(resourceClusterConfiguration.getConnectionPoolingConfig(), dtoClusterConfigurations.getSgPoolingConfig());
assertEquals(resourceClusterConfiguration.getPostgresConfig(), dtoClusterConfigurations.getSgPostgresConfig());
} else {
assertNull(dtoClusterConfigurations);
}
final ClusterPod dtoSpecPods = dtoSpec.getPods();
final StackGresClusterPod resourcePod = resourceSpec.getPod();
if (resourcePod != null) {
assertNotNull(dtoSpecPods);
assertEquals(resourcePod.getDisableConnectionPooling(), dtoSpecPods.getDisableConnectionPooling());
assertEquals(resourcePod.getDisableMetricsExporter(), dtoSpecPods.getDisableMetricsExporter());
assertEquals(resourcePod.getDisablePostgresUtil(), dtoSpecPods.getDisablePostgresUtil());
final ClusterPodPersistentVolume resourcePV = dtoSpecPods.getPersistentVolume();
final StackGresPodPersistentVolume dtoPV = resourcePod.getPersistentVolume();
if (dtoPV != null) {
assertNotNull(resourcePV);
assertEquals(dtoPV.getSize(), resourcePV.getSize());
assertEquals(dtoPV.getStorageClass(), resourcePV.getStorageClass());
} else {
assertNull(resourcePV);
}
if (resourceSpec.getMetadata() != null) {
assertNotNull(dtoSpec.getMetadata());
assertEquals(resourceSpec.getMetadata().getLabels().getClusterPods(), dtoSpec.getMetadata().getLabels().getClusterPods());
} else {
assertNull(dtoSpec.getMetadata());
}
if (resourcePod.getScheduling() != null) {
assertNotNull(dtoSpecPods.getScheduling());
assertEquals(resourcePod.getScheduling().getNodeSelector(), dtoSpecPods.getScheduling().getNodeSelector());
assertEquals(resourcePod.getScheduling().getNodeAffinity(), dtoSpecPods.getScheduling().getNodeAffinity());
} else {
assertNull(dtoSpecPods.getScheduling());
}
} else {
assertNull(dtoSpecPods);
}
if (resourceSpec.getDistributedLogs() != null) {
assertNotNull(dtoSpec.getDistributedLogs());
assertEquals(resourceSpec.getDistributedLogs().getDistributedLogs(), dtoSpec.getDistributedLogs().getDistributedLogs());
} else {
assertNull(dtoSpec.getDistributedLogs());
}
final StackGresClusterInitData resourceInitData = resourceSpec.getInitData();
if (resourceInitData != null) {
final ClusterInitData dtoInitData = dtoSpec.getInitData();
assertNotNull(dtoInitData);
if (resourceInitData.getRestore() != null) {
assertNotNull(dtoInitData.getRestore());
assertEquals(resourceInitData.getRestore().getFromBackup().getUid(), dtoInitData.getRestore().getFromBackup().getUid());
assertEquals(resourceInitData.getRestore().getDownloadDiskConcurrency(), dtoInitData.getRestore().getDownloadDiskConcurrency());
} else {
assertNull(dtoInitData.getRestore());
}
if (resourceInitData.getScripts() != null) {
assertNotNull(dtoInitData.getScripts());
Seq.zip(resourceInitData.getScripts(), dtoInitData.getScripts()).forEach(tuple -> {
assertEquals(tuple.v1.getDatabase(), tuple.v2.getDatabase());
assertEquals(tuple.v2.getName(), tuple.v2.getName());
assertEquals(tuple.v2.getScript(), tuple.v2.getScript());
final StackGresClusterScriptFrom resourceScriptFrom = tuple.v1.getScriptFrom();
final ClusterScriptFrom dtoScriptFrom = tuple.v2.getScriptFrom();
if (resourceScriptFrom != null) {
assertNotNull(dtoScriptFrom);
if (resourceScriptFrom.getSecretKeyRef() != null) {
assertNotNull(dtoScriptFrom.getSecretKeyRef());
assertEquals(resourceScriptFrom.getSecretKeyRef().getKey(), dtoScriptFrom.getSecretKeyRef().getKey());
assertEquals(resourceScriptFrom.getSecretKeyRef().getName(), dtoScriptFrom.getSecretKeyRef().getName());
} else {
assertNull(dtoScriptFrom.getSecretKeyRef());
}
if (resourceScriptFrom.getConfigMapKeyRef() != null) {
assertNotNull(dtoScriptFrom.getConfigMapKeyRef());
assertEquals(resourceScriptFrom.getConfigMapKeyRef().getKey(), dtoScriptFrom.getConfigMapKeyRef().getKey());
assertEquals(resourceScriptFrom.getConfigMapKeyRef().getName(), dtoScriptFrom.getConfigMapKeyRef().getName());
assertEquals(configMap.getData().get(resourceScriptFrom.getConfigMapKeyRef().getKey()), dtoScriptFrom.getConfigMapScript());
} else {
assertNull(dtoScriptFrom.getConfigMapKeyRef());
}
} else {
assertNull(dtoScriptFrom);
}
});
}
}
} else {
assertNull(dtoSpec);
}
if (dto.getPods() != null) {
assertEquals(1, dto.getPodsReady());
assertEquals(2, dto.getPods().size());
assertEquals(4, dto.getPods().get(0).getContainers());
assertEquals(4, dto.getPods().get(0).getContainersReady());
assertEquals("10.244.3.23", dto.getPods().get(0).getIp());
assertEquals("stackgres-0", dto.getPods().get(0).getName());
assertEquals("stackgres", dto.getPods().get(0).getNamespace());
assertEquals("primary", dto.getPods().get(0).getRole());
assertEquals("Active", dto.getPods().get(0).getStatus());
assertEquals(4, dto.getPods().get(0).getComponentVersions().size());
assertEquals("12.2", dto.getPods().get(0).getComponentVersions().get("postgresql"));
assertEquals("1.6.4", dto.getPods().get(0).getComponentVersions().get("patroni"));
assertEquals("1.13.1", dto.getPods().get(0).getComponentVersions().get("envoy"));
assertEquals("0.8", dto.getPods().get(0).getComponentVersions().get("prometheus-postgres-exporter"));
assertEquals(4, dto.getPods().get(1).getContainers());
assertEquals(0, dto.getPods().get(1).getContainersReady());
assertNull(dto.getPods().get(1).getIp());
assertEquals("stackgres-1", dto.getPods().get(1).getName());
assertEquals("stackgres", dto.getPods().get(1).getNamespace());
assertNull(dto.getPods().get(1).getRole());
assertEquals("Pending", dto.getPods().get(1).getStatus());
assertEquals(4, dto.getPods().get(1).getComponentVersions().size());
assertEquals("12.2", dto.getPods().get(1).getComponentVersions().get("postgresql"));
assertEquals("1.6.4", dto.getPods().get(1).getComponentVersions().get("patroni"));
assertEquals("1.13.1", dto.getPods().get(1).getComponentVersions().get("envoy"));
assertEquals("0.8", dto.getPods().get(1).getComponentVersions().get("prometheus-postgres-exporter"));
}
if (dto.getInfo() != null) {
String appendDns = "." + resource.getMetadata().getNamespace() + ".svc.cluster.local";
String expectedPrimaryDns = PatroniUtil.readWriteName(resource.getMetadata().getName()) + appendDns;
String expectedReplicasDns = "f4611c56942064ed5a468d8ce0a894ec.us-east-1.elb.amazonaws.com";
assertEquals(expectedPrimaryDns, dto.getInfo().getPrimaryDns());
assertEquals(expectedReplicasDns, dto.getInfo().getReplicasDns());
assertEquals("postgres", dto.getInfo().getSuperuserUsername());
assertEquals("superuser-password", dto.getInfo().getSuperuserPasswordKey());
assertEquals(resource.getMetadata().getName(), dto.getInfo().getSuperuserSecretName());
}
}
use of io.stackgres.apiweb.dto.cluster.ClusterSpec in project stackgres by ongres.
the class ClusterTransformer method getResourceSpec.
private ClusterSpec getResourceSpec(StackGresClusterSpec source) {
if (source == null) {
return null;
}
ClusterSpec transformation = new ClusterSpec();
transformation.setPostgres(new ClusterPostgres());
transformation.getPostgres().setVersion(source.getPostgres().getVersion());
transformation.getPostgres().setExtensions(Optional.ofNullable(source.getPostgres().getExtensions()).stream().flatMap(List::stream).map(this::getResourceExtension).collect(ImmutableList.toImmutableList()));
final StackGresClusterSsl sourceClusterSsl = source.getPostgres().getSsl();
if (sourceClusterSsl != null) {
transformation.getPostgres().setSsl(new ClusterSsl());
transformation.getPostgres().getSsl().setEnabled(sourceClusterSsl.getEnabled());
transformation.getPostgres().getSsl().setCertificateSecretKeySelector(sourceClusterSsl.getCertificateSecretKeySelector());
transformation.getPostgres().getSsl().setPrivateKeySecretKeySelector(sourceClusterSsl.getPrivateKeySecretKeySelector());
}
transformation.setConfigurations(new ClusterConfiguration());
transformation.getConfigurations().setSgBackupConfig(source.getConfiguration().getBackupConfig());
transformation.getConfigurations().setSgPoolingConfig(source.getConfiguration().getConnectionPoolingConfig());
transformation.setInstances(source.getInstances());
transformation.setNonProduction(getResourceNonProduction(source.getNonProduction()));
transformation.getConfigurations().setSgPostgresConfig(source.getConfiguration().getPostgresConfig());
transformation.setPrometheusAutobind(source.getPrometheusAutobind());
transformation.setSgInstanceProfile(source.getResourceProfile());
final StackGresClusterInitData sourceInitData = source.getInitData();
if (sourceInitData != null) {
ClusterInitData targetInitData = new ClusterInitData();
transformation.setInitData(targetInitData);
final StackGresClusterRestore sourceRestore = sourceInitData.getRestore();
if (sourceRestore != null) {
targetInitData.setRestore(getResourceRestore(sourceRestore));
}
if (sourceInitData.getScripts() != null) {
targetInitData.setScripts(sourceInitData.getScripts().stream().map(sourceEntry -> {
ClusterScriptEntry targetEntry = new ClusterScriptEntry();
targetEntry.setScript(sourceEntry.getScript());
targetEntry.setDatabase(sourceEntry.getDatabase());
targetEntry.setName(sourceEntry.getName());
if (sourceEntry.getScriptFrom() != null) {
targetEntry.setScriptFrom(new ClusterScriptFrom());
targetEntry.getScriptFrom().setSecretKeyRef(sourceEntry.getScriptFrom().getSecretKeyRef());
targetEntry.getScriptFrom().setConfigMapKeyRef(sourceEntry.getScriptFrom().getConfigMapKeyRef());
}
return targetEntry;
}).collect(ImmutableList.toImmutableList()));
}
}
final ClusterPod targetPod = new ClusterPod();
final StackGresClusterPod sourcePod = source.getPod();
transformation.setPods(targetPod);
targetPod.setPersistentVolume(new ClusterPodPersistentVolume());
targetPod.getPersistentVolume().setStorageClass(sourcePod.getPersistentVolume().getStorageClass());
targetPod.getPersistentVolume().setSize(sourcePod.getPersistentVolume().getSize());
targetPod.setDisableConnectionPooling(sourcePod.getDisableConnectionPooling());
targetPod.setDisableMetricsExporter(sourcePod.getDisableMetricsExporter());
targetPod.setDisablePostgresUtil(sourcePod.getDisablePostgresUtil());
final StackGresClusterSpecMetadata specMetadata = source.getMetadata();
if (specMetadata != null) {
transformation.setMetadata(new ClusterSpecMetadata());
final StackGresClusterSpecAnnotations sourceAnnotations = specMetadata.getAnnotations();
if (specMetadata.getAnnotations() != null) {
ClusterSpecAnnotations targetAnnotations = new ClusterSpecAnnotations();
targetAnnotations.setAllResources(sourceAnnotations.getAllResources());
targetAnnotations.setClusterPods(sourceAnnotations.getClusterPods());
targetAnnotations.setServices(sourceAnnotations.getServices());
targetAnnotations.setPrimaryService(sourceAnnotations.getPrimaryService());
targetAnnotations.setReplicasService(sourceAnnotations.getReplicasService());
transformation.getMetadata().setAnnotations(targetAnnotations);
}
final StackGresClusterSpecLabels sourceLabels = specMetadata.getLabels();
if (sourceLabels != null) {
ClusterSpecLabels targetLabels = new ClusterSpecLabels();
targetLabels.setClusterPods(sourceLabels.getClusterPods());
transformation.getMetadata().setLabels(targetLabels);
}
}
final StackGresClusterPostgresServices sourcePostgresServices = source.getPostgresServices();
if (sourcePostgresServices != null) {
transformation.setPostgresServices(new ClusterPostgresServices());
final ClusterPostgresServices targetPostgresService = transformation.getPostgresServices();
final StackGresPostgresService sourcePrimaryService = sourcePostgresServices.getPrimary();
if (sourcePrimaryService != null) {
targetPostgresService.setPrimary(new PostgresService());
targetPostgresService.getPrimary().setType(sourcePrimaryService.getType());
targetPostgresService.getPrimary().setEnabled(sourcePrimaryService.getEnabled());
}
final StackGresPostgresService sourceReplicaService = sourcePostgresServices.getReplicas();
if (sourceReplicaService != null) {
targetPostgresService.setReplicas(new PostgresService());
targetPostgresService.getReplicas().setEnabled(sourceReplicaService.getEnabled());
targetPostgresService.getReplicas().setType(sourceReplicaService.getType());
}
}
targetPod.setScheduling(Optional.ofNullable(sourcePod.getScheduling()).map(sourcePodScheduling -> {
return new ClusterPodSchedulingConverter().from(sourcePodScheduling);
}).orElse(null));
transformation.setDistributedLogs(getResourceDistributedLogs(source.getDistributedLogs()));
if (source.getToInstallPostgresExtensions() != null) {
transformation.setToInstallPostgresExtensions(source.getToInstallPostgresExtensions().stream().map(this::getClusterInstalledExtension).collect(ImmutableList.toImmutableList()));
}
return transformation;
}
use of io.stackgres.apiweb.dto.cluster.ClusterSpec in project stackgres by ongres.
the class NamespacedClusterLogsResource method logs.
/**
* Query distributed logs and return a list of {@code ClusterLogEntry}.
*/
@Operation(responses = { @ApiResponse(responseCode = "200", description = "OK", content = { @Content(mediaType = "application/json", array = @ArraySchema(schema = @Schema(implementation = ClusterLogEntryDto.class))) }) })
@CommonApiResponses
@GET
@Path("{name}/logs")
public List<ClusterLogEntryDto> logs(@PathParam("namespace") String namespace, @PathParam("name") String name, @QueryParam("records") Integer records, @QueryParam("from") String from, @QueryParam("to") String to, @QueryParam("sort") String sort, @QueryParam("text") String text, @QueryParam("logType") List<String> logType, @QueryParam("podName") List<String> podName, @QueryParam("role") List<String> role, @QueryParam("errorLevel") List<String> errorLevel, @QueryParam("userName") List<String> userName, @QueryParam("databaseName") List<String> databaseName, @QueryParam("fromInclusive") Boolean fromInclusive) {
final ClusterDto cluster = clusterFinder.findByNameAndNamespace(name, namespace).orElseThrow(NotFoundException::new);
final int calculatedRecords = records != null ? records : 50;
if (calculatedRecords <= 0) {
throw new BadRequestException("records should be a positive number");
}
final Optional<Tuple2<Instant, Integer>> fromTuple;
final Optional<Tuple2<Instant, Integer>> toTuple;
if (!Optional.ofNullable(cluster.getSpec()).map(ClusterSpec::getDistributedLogs).map(ClusterDistributedLogs::getDistributedLogs).isPresent()) {
throw new BadRequestException("Distributed logs are not configured for specified cluster");
}
final var filters = ImmutableMap.<String, ImmutableList<String>>builder();
addFilter("logType", logType, filters);
addFilter("podName", podName, filters);
addFilter("role", role, filters);
addFilter("errorLevel", errorLevel, filters);
addFilter("userName", userName, filters);
addFilter("databaseName", databaseName, filters);
try {
fromTuple = Optional.ofNullable(from).map(s -> s.split(",")).map(ss -> Tuple.tuple(ss[0], ss.length > 1 ? ss[1] : String.valueOf(Integer.valueOf(0)))).map(t -> t.map1(Instant::parse)).map(t -> t.map2(Integer::valueOf));
} catch (Exception ex) {
throw new BadRequestException("from should be a timestamp" + " or a timestamp and an index separated by character ','", ex);
}
try {
toTuple = Optional.ofNullable(to).map(s -> s.split(",")).map(ss -> Tuple.tuple(ss[0], ss.length > 1 ? ss[1] : String.valueOf(Integer.MAX_VALUE))).map(t -> t.map1(Instant::parse)).map(t -> t.map2(Integer::valueOf));
} catch (Exception ex) {
throw new BadRequestException("to should be a timestamp" + " or a timestamp and an index separated by character ','", ex);
}
if (sort != null && !sort.equals("asc") && !sort.equals("desc")) {
throw new BadRequestException("sort only accept asc or desc values");
}
DistributedLogsQueryParameters logs = ImmutableDistributedLogsQueryParameters.builder().cluster(cluster).records(calculatedRecords).fromTimeAndIndex(fromTuple).toTimeAndIndex(toTuple).filters(filters.build()).isSortAsc(Objects.equals("asc", sort)).fullTextSearchQuery(Optional.ofNullable(text).map(FullTextSearchQuery::new)).isFromInclusive(fromInclusive != null && fromInclusive).build();
return distributedLogsFetcher.logs(logs);
}
use of io.stackgres.apiweb.dto.cluster.ClusterSpec in project stackgres by ongres.
the class DistributedLogsFetcherImpl method getConnection.
private Connection getConnection(ClusterDto cluster) throws SQLException {
final String distributedLogs = Optional.ofNullable(cluster.getSpec()).map(ClusterSpec::getDistributedLogs).map(ClusterDistributedLogs::getDistributedLogs).orElseThrow(() -> new IllegalArgumentException("Distributed logs are not configured for this cluster"));
String namespace = StackGresUtil.getNamespaceFromRelativeId(distributedLogs, cluster.getMetadata().getNamespace());
String name = StackGresUtil.getNameFromRelativeId(distributedLogs);
String serviceName = PatroniUtil.name(name);
Secret secret = secretFinder.findByNameAndNamespace(name, namespace).orElseThrow(() -> new NotFoundException("Secret with username and password for user postgres can not be found."));
return postgresConnectionManager.getConnection(serviceName + "." + namespace, "postgres", ResourceUtil.decodeSecret(secret.getData().get("superuser-password")), FluentdUtil.databaseName(cluster.getMetadata().getNamespace(), cluster.getMetadata().getName()));
}
use of io.stackgres.apiweb.dto.cluster.ClusterSpec in project stackgres by ongres.
the class ClusterResourceMockedTest method checkCustomResource.
@Override
protected void checkCustomResource(StackGresCluster resource, ClusterDto resourceDto, Operation operation) {
final Metadata dtoMetadata = resourceDto.getMetadata();
final ObjectMeta resourceMetadata = resource.getMetadata();
if (dtoMetadata != null) {
assertNotNull(resourceMetadata);
assertEquals(dtoMetadata.getName(), resourceMetadata.getName());
assertEquals(dtoMetadata.getNamespace(), resourceMetadata.getNamespace());
assertEquals(dtoMetadata.getUid(), resourceMetadata.getUid());
} else {
assertNull(resourceMetadata);
}
final ClusterSpec dtoSpec = resourceDto.getSpec();
final StackGresClusterSpec resourceSpec = resource.getSpec();
if (dtoSpec != null) {
assertNotNull(resourceSpec);
assertEquals(dtoSpec.getPrometheusAutobind(), resourceSpec.getPrometheusAutobind());
assertEquals(dtoSpec.getInstances(), resourceSpec.getInstances());
assertEquals(dtoSpec.getPostgres().getVersion(), resourceSpec.getPostgres().getVersion());
assertEquals(dtoSpec.getSgInstanceProfile(), resourceSpec.getResourceProfile());
final ClusterConfiguration dtoSpecConfigurations = dtoSpec.getConfigurations();
final StackGresClusterConfiguration resourceSpecConfiguration = resourceSpec.getConfiguration();
if (dtoSpecConfigurations != null) {
assertNotNull(resourceSpecConfiguration);
assertEquals(dtoSpecConfigurations.getSgBackupConfig(), resourceSpecConfiguration.getBackupConfig());
assertEquals(dtoSpecConfigurations.getSgPoolingConfig(), resourceSpecConfiguration.getConnectionPoolingConfig());
assertEquals(dtoSpecConfigurations.getSgPostgresConfig(), resourceSpecConfiguration.getPostgresConfig());
} else {
assertNull(resourceSpecConfiguration);
}
final ClusterPod dtoSpecPods = dtoSpec.getPods();
if (dtoSpecPods != null) {
final StackGresClusterPod resourceSpecPod = resourceSpec.getPod();
assertNotNull(resourceSpecPod);
assertEquals(dtoSpecPods.getDisableConnectionPooling(), resourceSpecPod.getDisableConnectionPooling());
assertEquals(dtoSpecPods.getDisableMetricsExporter(), resourceSpecPod.getDisableMetricsExporter());
assertEquals(dtoSpecPods.getDisableMetricsExporter(), resourceSpecPod.getDisableMetricsExporter());
final ClusterPodPersistentVolume dtoPV = dtoSpecPods.getPersistentVolume();
final StackGresPodPersistentVolume resourcePV = resourceSpecPod.getPersistentVolume();
if (dtoPV != null) {
assertNotNull(resourcePV);
assertEquals(dtoPV.getSize(), resourcePV.getSize());
assertEquals(dtoPV.getStorageClass(), resourcePV.getStorageClass());
} else {
assertNull(resourcePV);
}
final StackGresClusterSpecLabels resourceMetadataLabels = Optional.ofNullable(resourceSpec.getMetadata()).map(StackGresClusterSpecMetadata::getLabels).orElse(null);
final ClusterSpecLabels dtoMetadataLabels = Optional.ofNullable(dtoSpec.getMetadata()).map(ClusterSpecMetadata::getLabels).orElse(null);
if (dtoMetadataLabels != null) {
assertNotNull(resourceMetadataLabels);
assertEquals(dtoMetadataLabels.getClusterPods(), resourceMetadataLabels.getClusterPods());
} else {
assertNull(resourceMetadataLabels);
}
final ClusterPodScheduling podScheduling = dtoSpecPods.getScheduling();
final StackGresClusterPodScheduling resourceScheduling = resourceSpecPod.getScheduling();
if (podScheduling != null) {
assertNotNull(resourceScheduling);
assertEquals(podScheduling.getNodeSelector(), resourceScheduling.getNodeSelector());
assertEquals(podScheduling.getNodeAffinity(), resourceScheduling.getNodeAffinity());
} else {
assertNull(resourceScheduling);
}
}
final ClusterInitData dtoInitData = dtoSpec.getInitData();
final StackGresClusterInitData resourceInitData = resourceSpec.getInitData();
if (dtoInitData != null) {
assertNotNull(resourceInitData);
final ClusterRestore dtoRestore = dtoInitData.getRestore();
final StackGresClusterRestore resourceRestore = resourceInitData.getRestore();
if (dtoRestore != null) {
assertNotNull(resourceRestore);
assertEquals(dtoRestore.getFromBackup().getUid(), resourceRestore.getFromBackup().getUid());
} else {
assertNull(resourceRestore);
}
if (dtoInitData.getScripts() != null) {
assertNotNull(resourceInitData.getScripts());
assertEquals(dtoInitData.getScripts().size(), resourceInitData.getScripts().size());
Seq.zip(dtoInitData.getScripts(), resourceInitData.getScripts()).forEach(entryTuple -> {
ClusterScriptEntry dtoEntry = entryTuple.v1;
StackGresClusterScriptEntry resourceEntry = entryTuple.v2;
assertEquals(dtoEntry.getDatabase(), resourceEntry.getDatabase());
assertEquals(dtoEntry.getName(), resourceEntry.getName());
assertEquals(dtoEntry.getScript(), resourceEntry.getScript());
final ClusterScriptFrom dtoScriptFrom = dtoEntry.getScriptFrom();
final StackGresClusterScriptFrom resourceScriptFrom = resourceEntry.getScriptFrom();
if (dtoScriptFrom != null) {
assertNotNull(resourceScriptFrom);
final SecretKeySelector dtoSecretKeyRef = dtoScriptFrom.getSecretKeyRef();
final SecretKeySelector resourceSecretKeyRef = resourceScriptFrom.getSecretKeyRef();
if (dtoSecretKeyRef != null) {
assertNotNull(resourceSecretKeyRef);
assertEquals(dtoSecretKeyRef.getName(), resourceSecretKeyRef.getName());
assertEquals(dtoSecretKeyRef.getKey(), resourceSecretKeyRef.getKey());
} else {
assertNull(resourceSecretKeyRef);
}
final ConfigMapKeySelector resourceConfigMapKeyRef = resourceScriptFrom.getConfigMapKeyRef();
final ConfigMapKeySelector dtoConfigMapKeyRef = dtoScriptFrom.getConfigMapKeyRef();
if (dtoConfigMapKeyRef != null) {
assertNotNull(resourceConfigMapKeyRef);
assertEquals(dtoConfigMapKeyRef.getName(), resourceConfigMapKeyRef.getName());
assertEquals(dtoConfigMapKeyRef.getKey(), resourceConfigMapKeyRef.getKey());
} else {
assertNull(resourceConfigMapKeyRef);
}
} else {
assertNull(resourceScriptFrom);
}
});
}
} else {
assertNull(resourceInitData);
}
if (dtoSpec.getDistributedLogs() != null) {
assertNotNull(resourceSpec.getDistributedLogs());
assertEquals(dtoSpec.getDistributedLogs().getDistributedLogs(), resourceSpec.getDistributedLogs().getDistributedLogs());
} else {
assertNull(resourceSpec.getDistributedLogs());
}
} else {
assertNull(resourceSpec);
}
}
Aggregations