use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class DistributedLogsConciliatorTest method shouldIgnoreChangesMarkedPauseUntilRestartAnnotationIfClusterIsPendingToRestart.
@Test
@DisplayName("Conciliation Should Ignore Changes On Resources Marked With Reconciliation " + "Pause Until Restart Annotation If The Cluster Is Pending To Restart")
void shouldIgnoreChangesMarkedPauseUntilRestartAnnotationIfClusterIsPendingToRestart() {
final List<HasMetadata> requiredResources = KubernetessMockResourceGenerationUtil.buildResources("test", "test");
final List<HasMetadata> deployedResources = deepCopy(requiredResources);
deployedResources.stream().findAny().orElseThrow().getMetadata().setAnnotations(Map.of(StackGresContext.RECONCILIATION_PAUSE_UNTIL_RESTART_KEY, Boolean.TRUE.toString()));
Conciliator<StackGresDistributedLogs> conciliator = buildConciliator(requiredResources, deployedResources);
reset(statusManager);
when(statusManager.isPendingRestart(distributedLogs)).thenReturn(true);
ReconciliationResult result = conciliator.evalReconciliationState(getConciliationResource());
assertEquals(0, result.getPatches().size());
assertTrue(result.isUpToDate());
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class DistributedLogsStatusManager method isPendingRestart.
/**
* Check pending restart status condition.
*/
public boolean isPendingRestart(StackGresDistributedLogs distributedLogs) {
List<StackGresClusterPodStatus> clusterPodStatuses = Optional.ofNullable(distributedLogs.getStatus()).map(StackGresDistributedLogsStatus::getPodStatuses).orElse(ImmutableList.of());
Optional<StatefulSet> clusterStatefulSet = getClusterStatefulSet(distributedLogs);
List<Pod> clusterPods = clusterStatefulSet.map(sts -> getStsPods(sts, distributedLogs)).orElse(ImmutableList.of());
RestartReasons reasons = ClusterPendingRestartUtil.getRestartReasons(clusterPodStatuses, clusterStatefulSet, clusterPods);
for (RestartReason reason : reasons.getReasons()) {
switch(reason) {
case OPERATOR_VERSION:
LOGGER.debug("Distributed Logs {} requires restart due to operator version change", getDistributedLogsId(distributedLogs));
break;
case PATRONI:
LOGGER.debug("Distributed Logs {} requires restart due to patroni's indication", getDistributedLogsId(distributedLogs));
break;
case POD_STATUS:
LOGGER.debug("Distributed Logs {} requires restart due to pod status indication", getDistributedLogsId(distributedLogs));
break;
case STATEFULSET:
LOGGER.debug("Distributed Logs {} requires restart due to pod template changes", getDistributedLogsId(distributedLogs));
break;
default:
break;
}
}
return reasons.requiresRestart();
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class DistributedLogsClusterReconciliator method updateStatus.
private boolean updateStatus(StackGresDistributedLogs distributedLogs, String database, String retention) {
Optional<StackGresDistributedLogsStatusDatabase> foundDistributedLogsDatabase = distributedLogs.getStatus().getDatabases().stream().filter(databaseStatus -> databaseStatus.getName().equals(database)).findAny();
final StackGresDistributedLogsStatusDatabase distributedLogsDatabase = foundDistributedLogsDatabase.orElseGet(() -> new StackGresDistributedLogsStatusDatabase());
if (!foundDistributedLogsDatabase.isPresent()) {
distributedLogs.getStatus().getDatabases().add(distributedLogsDatabase);
}
if (Objects.isNull(distributedLogsDatabase.getName()) || !Objects.equals(retention, distributedLogsDatabase.getRetention())) {
distributedLogsDatabase.setName(database);
distributedLogsDatabase.setRetention(retention);
return true;
}
return false;
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class DistributedLogsPodTemplateSpecFactory method getPodTemplateSpec.
@Override
public PodTemplateResult getPodTemplateSpec(DistributedLogsContainerContext context) {
StackGresDistributedLogs cluster = context.getDistributedLogsContext().getSource();
final Map<String, String> podLabels = labelFactory.statefulSetPodLabels(cluster);
List<ContainerFactory<DistributedLogsContainerContext>> containerFactories = containerFactoryDiscoverer.discoverContainers(context);
List<Container> containers = containerFactories.stream().map(f -> f.getContainer(context)).collect(Collectors.toUnmodifiableList());
final List<ContainerFactory<DistributedLogsContainerContext>> initContainerFactories = initContainerFactoryDiscoverer.discoverContainers(context);
List<Container> initContainers = initContainerFactories.stream().map(f -> f.getContainer(context)).collect(Collectors.toUnmodifiableList());
final List<String> claimedVolumes = Stream.concat(containers.stream(), initContainers.stream()).flatMap(container -> container.getVolumeMounts().stream()).map(VolumeMount::getName).distinct().collect(Collectors.toUnmodifiableList());
claimedVolumes.forEach(rv -> {
if (!context.availableVolumes().containsKey(rv) && !context.getDataVolumeName().equals(rv)) {
throw new IllegalStateException("Volume " + rv + " is required but not available");
}
});
List<Volume> volumes = claimedVolumes.stream().map(volumeName -> context.availableVolumes().get(volumeName)).filter(Objects::nonNull).collect(Collectors.toUnmodifiableList());
var podTemplateSpec = new PodTemplateSpecBuilder().withMetadata(new ObjectMetaBuilder().addToLabels(podLabels).addToAnnotations(StackGresContext.VERSION_KEY, cluster.getMetadata().getAnnotations().getOrDefault(StackGresContext.VERSION_KEY, StackGresProperty.OPERATOR_VERSION.getString())).build()).withNewSpec().withAffinity(Optional.of(new AffinityBuilder().withPodAntiAffinity(new PodAntiAffinityBuilder().addAllToRequiredDuringSchedulingIgnoredDuringExecution(ImmutableList.of(new PodAffinityTermBuilder().withLabelSelector(new LabelSelectorBuilder().withMatchExpressions(new LabelSelectorRequirementBuilder().withKey(StackGresContext.APP_KEY).withOperator("In").withValues(labelFactory.labelMapper().appName()).build(), new LabelSelectorRequirementBuilder().withKey("cluster").withOperator("In").withValues("true").build()).build()).withTopologyKey("kubernetes.io/hostname").build())).build()).build()).filter(affinity -> Optional.ofNullable(cluster.getSpec().getNonProduction()).map(StackGresDistributedLogsNonProduction::getDisableClusterPodAntiAffinity).map(disableClusterPodAntiAffinity -> !disableClusterPodAntiAffinity).orElse(true)).orElse(null)).withNodeSelector(Optional.ofNullable(cluster.getSpec()).map(StackGresDistributedLogsSpec::getScheduling).map(StackGresDistributedLogsPodScheduling::getNodeSelector).orElse(null)).withTolerations(Optional.ofNullable(cluster.getSpec()).map(StackGresDistributedLogsSpec::getScheduling).map(StackGresDistributedLogsPodScheduling::getTolerations).map(tolerations -> Seq.seq(tolerations).map(TolerationBuilder::new).map(TolerationBuilder::build).toList()).orElse(null)).withShareProcessNamespace(Boolean.TRUE).withServiceAccountName(PatroniRole.roleName(context.getDistributedLogsContext())).withSecurityContext(podSecurityContext.createResource(context.getDistributedLogsContext())).withVolumes(volumes).withContainers(containers).withInitContainers(initContainers).withTerminationGracePeriodSeconds(60L).endSpec().build();
return ImmutablePodTemplateResult.builder().spec(podTemplateSpec).claimedVolumes(claimedVolumes).build();
}
use of io.stackgres.common.crd.sgdistributedlogs.StackGresDistributedLogs in project stackgres by ongres.
the class DistributedLogsStatefulSet method generateResource.
@Override
public Stream<HasMetadata> generateResource(StackGresDistributedLogsContext context) {
final StackGresDistributedLogs cluster = context.getSource();
final ObjectMeta metadata = cluster.getMetadata();
final String name = metadata.getName();
final String namespace = metadata.getNamespace();
final StackGresDistributedLogsPersistentVolume persistentVolume = cluster.getSpec().getPersistentVolume();
StorageConfig dataStorageConfig = ImmutableStorageConfig.builder().size(persistentVolume.getSize()).storageClass(Optional.ofNullable(persistentVolume.getStorageClass()).orElse(null)).build();
final PersistentVolumeClaimSpecBuilder volumeClaimSpec = new PersistentVolumeClaimSpecBuilder().withAccessModes("ReadWriteOnce").withResources(dataStorageConfig.getResourceRequirements()).withStorageClassName(dataStorageConfig.getStorageClass());
final Map<String, String> labels = labelFactory.clusterLabels(cluster);
final Map<String, String> podLabels = labelFactory.statefulSetPodLabels(cluster);
Map<String, VolumePair> availableVolumesPairs = volumeDiscoverer.discoverVolumes(context);
Map<String, Volume> availableVolumes = availableVolumesPairs.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, vp -> vp.getValue().getVolume()));
final PodTemplateResult buildPodTemplate = podTemplateSpecFactory.getPodTemplateSpec(ImmutableDistributedLogsContainerContext.builder().distributedLogsContext(context).availableVolumes(availableVolumes).dataVolumeName(dataName(cluster)).build());
StatefulSet clusterStatefulSet = new StatefulSetBuilder().withNewMetadata().withNamespace(namespace).withName(name).withLabels(labels).endMetadata().withNewSpec().withReplicas(1).withSelector(new LabelSelectorBuilder().addToMatchLabels(podLabels).build()).withUpdateStrategy(new StatefulSetUpdateStrategyBuilder().withType("OnDelete").build()).withServiceName(name).withTemplate(buildPodTemplate.getSpec()).withVolumeClaimTemplates(Stream.of(Stream.of(new PersistentVolumeClaimBuilder().withNewMetadata().withNamespace(namespace).withName(dataName(cluster)).withLabels(labels).endMetadata().withSpec(volumeClaimSpec.build()).build())).flatMap(s -> s).toArray(PersistentVolumeClaim[]::new)).endSpec().build();
var volumeDependencies = buildPodTemplate.claimedVolumes().stream().map(availableVolumesPairs::get).filter(Objects::nonNull).map(VolumePair::getSource).filter(Optional::isPresent).map(Optional::get);
return Stream.concat(Stream.of(clusterStatefulSet), volumeDependencies);
}
Aggregations