use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class StackUpdater method doUpdateStackStatus.
private Stack doUpdateStackStatus(Long stackId, Status newStatus, DetailedStackStatus newDetailedStatus, String statusReason) {
Stack stack = stackService.getByIdWithTransaction(stackId);
StackStatus actualStackStatus = stack.getStackStatus();
LOGGER.info("Update stack status from: {}/{} to: {}/{} stack: {} reason: {}", actualStackStatus.getStatus(), actualStackStatus.getDetailedStackStatus(), newStatus, newDetailedStatus, stackId, statusReason);
if (actualStackStatus.getStatus().equals(newStatus)) {
LOGGER.debug("New status is the same as previous status {}/{}, skip status update.", actualStackStatus.getStatus(), actualStackStatus.getDetailedStackStatus());
return stack;
} else if (!stack.isDeleteCompleted()) {
stack.setStackStatus(new StackStatus(stack, newStatus, statusReason, newDetailedStatus));
Cluster cluster = stack.getCluster();
if (newStatus.isRemovableStatus()) {
InMemoryStateStore.deleteStack(stackId);
if (cluster != null) {
InMemoryStateStore.deleteCluster(cluster.getId());
}
} else {
InMemoryStateStore.putStack(stackId, statusToPollGroupConverter.convert(newStatus));
if (cluster != null) {
InMemoryStateStore.putCluster(cluster.getId(), statusToPollGroupConverter.convert(newStatus));
}
}
stack = stackService.save(stack);
saveDeprecatedClusterStatus(statusReason, stack, newStatus);
usageLoggingUtil.logClusterStatusChangeUsageEvent(actualStackStatus.getStatus(), newStatus, cluster);
} else {
LOGGER.info("Stack is in DELETE_COMPLETED status, cannot update status.");
}
return stack;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class IdBrokerService method generateIdBrokerSignKey.
public void generateIdBrokerSignKey(Long stackId) {
Cluster cluster = clusterService.findOneByStackIdOrNotFoundError(stackId);
IdBroker idBroker = repository.findByClusterId(cluster.getId());
if (idBroker == null) {
LOGGER.debug("Generate IdBroker sign keys for the cluster");
idBroker = idBrokerConverterUtil.generateIdBrokerSignKeys(cluster);
repository.save(idBroker);
} else {
LOGGER.debug("IdBroker sign keysh have already been created");
}
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class GatewayPublicEndpointManagementService method updateDnsEntryForCluster.
public String updateDnsEntryForCluster(Stack stack) {
String fqdn = updateDnsEntry(stack, null);
if (fqdn != null) {
Cluster cluster = stack.getCluster();
cluster.setFqdn(fqdn);
clusterService.save(cluster);
LOGGER.info("The '{}' domain name has been generated, registered through PEM service and saved for the cluster.", fqdn);
}
return fqdn;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class RedbeamsDbCertificateProvider method getDatalakeDatabaseRootCerts.
private void getDatalakeDatabaseRootCerts(Stack stack, Set<String> result) {
if (StackType.WORKLOAD.equals(stack.getType())) {
Optional<Stack> datalakeStack = datalakeService.getDatalakeStackByDatahubStack(stack);
LOGGER.debug("Gathering datalake and its database if exists for the cluster");
if (datalakeStack.isPresent()) {
Cluster dataLakeCluster = datalakeStack.get().getCluster();
result.addAll(getDatabaseRootCerts(dataLakeCluster));
} else {
LOGGER.info("There is no datalake resource could be found for the cluster.");
}
}
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class MountDisks method mountDisks.
private void mountDisks(Stack stack, Set<Node> nodesWithDiskData, Set<Node> allNodes) throws CloudbreakException {
Cluster cluster = stack.getCluster();
try {
List<GatewayConfig> gatewayConfigs = gatewayConfigService.getAllGatewayConfigs(stack);
ExitCriteriaModel exitCriteriaModel = clusterDeletionBasedModel(stack.getId(), cluster.getId());
Map<String, Map<String, String>> mountInfo;
if (isCbVersionPostOptimisation(stack)) {
mountInfo = hostOrchestrator.formatAndMountDisksOnNodes(stack, gatewayConfigs, nodesWithDiskData, allNodes, exitCriteriaModel, stack.getPlatformVariant());
} else {
mountInfo = hostOrchestrator.formatAndMountDisksOnNodesLegacy(gatewayConfigs, nodesWithDiskData, allNodes, exitCriteriaModel, stack.getPlatformVariant());
}
mountInfo.forEach((hostname, value) -> {
Optional<String> instanceIdOptional = stack.getInstanceMetaDataAsList().stream().filter(instanceMetaData -> hostname.equals(instanceMetaData.getDiscoveryFQDN())).filter(instanceMetaData -> InstanceStatus.CREATED.equals(instanceMetaData.getInstanceStatus())).map(InstanceMetaData::getInstanceId).findFirst();
if (instanceIdOptional.isPresent()) {
String uuids = value.getOrDefault("uuids", "");
String fstab = value.getOrDefault("fstab", "");
if (!StringUtils.isEmpty(uuids) && !StringUtils.isEmpty(fstab)) {
persistUuidAndFstab(stack, instanceIdOptional.get(), hostname, uuids, fstab);
}
}
});
} catch (CloudbreakOrchestratorFailedException e) {
LOGGER.error("Failed to mount disks", e);
throw new CloudbreakSecuritySetupException(e);
}
}
Aggregations