use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class ClusterSyncHandler method accept.
@Override
public void accept(Event<ClusterSyncRequest> event) {
ClusterSyncRequest request = event.getData();
ClusterSyncResult result;
try {
Stack stack = stackService.getByIdWithListsInTransaction(request.getResourceId());
Cluster cluster = clusterService.retrieveClusterByStackIdWithoutAuth(request.getResourceId()).orElse(null);
clusterStatusUpdater.updateClusterStatus(stack, cluster);
if (cluster != null && (stack.isAvailable() || stack.isMaintenanceModeEnabled())) {
instanceMetadataUpdater.updatePackageVersionsOnAllInstances(stack.getId());
}
result = new ClusterSyncResult(request);
} catch (Exception e) {
result = new ClusterSyncResult(e.getMessage(), e, request);
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class StopStartDownscaleDecommissionViaCMHandler method doAccept.
@Override
protected Selectable doAccept(HandlerEvent<StopStartDownscaleDecommissionViaCMRequest> event) {
StopStartDownscaleDecommissionViaCMRequest request = event.getData();
LOGGER.info("StopStartDownscaleDecommissionViaCMHandler for: {}, {}", event.getData().getResourceId(), event.getData());
try {
Stack stack = stackService.getByIdWithLists(request.getResourceId());
Cluster cluster = stack.getCluster();
ClusterDecomissionService clusterDecomissionService = clusterApiConnectors.getConnector(stack).clusterDecomissionService();
Set<String> hostNames = getHostNamesForPrivateIds(request.getInstanceIdsToDecommission(), stack);
LOGGER.debug("Attempting to decommission hosts. count={}, hostnames={}", hostNames.size(), hostNames);
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(cluster.getId(), request.getHostGroupName()).orElseThrow(NotFoundException.notFound("hostgroup", request.getHostGroupName()));
Map<String, InstanceMetaData> hostsToRemove = clusterDecomissionService.collectHostsToRemove(hostGroup, hostNames);
List<String> missingHostsInCm = Collections.emptyList();
if (hostNames.size() != hostsToRemove.size()) {
missingHostsInCm = hostNames.stream().filter(h -> !hostsToRemove.containsKey(h)).collect(Collectors.toList());
LOGGER.info("Found fewer instances in CM to decommission, as compared to initial ask. foundCount={}, initialCount={}, missingHostsInCm={}", hostsToRemove.size(), hostNames.size(), missingHostsInCm);
}
// TODO CB-14929: Potentially put the nodes into maintenance mode before decommissioning?
// TODO CB-15132: Eventually, try parsing the results of the CM decommission, and see if a partial decommission went through in the
// timebound specified.
Set<String> decommissionedHostNames = Collections.emptySet();
if (hostsToRemove.size() > 0) {
decommissionedHostNames = clusterDecomissionService.decommissionClusterNodesStopStart(hostsToRemove, POLL_FOR_10_MINUTES);
updateInstanceStatuses(hostsToRemove, decommissionedHostNames, InstanceStatus.DECOMMISSIONED, "decommission requested for instances");
}
// This doesn't handle failures. It handles scenarios where CM list APIs don't have the necessary hosts available.
List<String> allMissingHostnames = null;
if (missingHostsInCm.size() > 0) {
allMissingHostnames = new LinkedList<>(missingHostsInCm);
}
if (hostsToRemove.size() != decommissionedHostNames.size()) {
Set<String> finalDecommissionedHostnames = decommissionedHostNames;
List<String> additionalMissingDecommissionHostnames = hostsToRemove.keySet().stream().filter(h -> !finalDecommissionedHostnames.contains(h)).collect(Collectors.toList());
LOGGER.info("Decommissioned fewer instances than requested. decommissionedCount={}, expectedCount={}, initialCount={}, notDecommissioned=[{}]", decommissionedHostNames.size(), hostsToRemove.size(), hostNames.size(), additionalMissingDecommissionHostnames);
if (allMissingHostnames == null) {
allMissingHostnames = new LinkedList<>();
}
allMissingHostnames.addAll(additionalMissingDecommissionHostnames);
}
LOGGER.info("hostsDecommissioned: count={}, hostNames={}", decommissionedHostNames.size(), decommissionedHostNames);
if (decommissionedHostNames.size() > 0) {
LOGGER.debug("Attempting to put decommissioned hosts into maintenance mode. count={}", decommissionedHostNames.size());
flowMessageService.fireEventAndLog(stack.getId(), UPDATE_IN_PROGRESS.name(), CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTERINGCMMAINTMODE, String.valueOf(decommissionedHostNames.size()));
clusterDecomissionService.enterMaintenanceMode(decommissionedHostNames);
flowMessageService.fireEventAndLog(stack.getId(), UPDATE_IN_PROGRESS.name(), CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTEREDCMMAINTMODE, String.valueOf(decommissionedHostNames.size()));
LOGGER.debug("Successfully put decommissioned hosts into maintenance mode. count={}", decommissionedHostNames.size());
} else {
LOGGER.debug("No nodes decommissioned, hence no nodes being put into maintenance mode");
}
return new StopStartDownscaleDecommissionViaCMResult(request, decommissionedHostNames, allMissingHostnames);
} catch (Exception e) {
// TODO CB-15132: This can be improved based on where and when the Exception occurred to potentially rollback certain aspects.
// ClusterClientInitException is one which is explicitly thrown.
String message = "Failed while attempting to decommission nodes via CM";
LOGGER.error(message, e);
return new StopStartDownscaleDecommissionViaCMResult(message, e, request);
}
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class RestartClusterManagerServerHandler method restartCMServer.
private void restartCMServer(Stack stack) throws Exception {
Cluster cluster = stack.getCluster();
InstanceMetaData gatewayInstance = stack.getPrimaryGatewayInstance();
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, gatewayInstance, cluster.hasGateway());
Set<String> gatewayFQDN = Collections.singleton(gatewayInstance.getDiscoveryFQDN());
ExitCriteriaModel exitModel = ClusterDeletionBasedExitCriteriaModel.clusterDeletionBasedModel(stack.getId(), cluster.getId());
Set<Node> nodes = stackUtil.collectReachableNodes(stack);
Set<String> agentTargets = nodes.stream().map(Node::getHostname).collect(Collectors.toSet());
hostOrchestrator.updateAgentCertDirectoryPermission(gatewayConfig, agentTargets, nodes, exitModel);
hostOrchestrator.restartClusterManagerOnMaster(gatewayConfig, gatewayFQDN, nodes, exitModel);
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class DatabaseBackupHandler method doAccept.
@Override
protected Selectable doAccept(HandlerEvent<DatabaseBackupRequest> event) {
DatabaseBackupRequest request = event.getData();
Selectable result;
Long stackId = request.getResourceId();
LOGGER.debug("Backing up database on stack {}, backup id {}", stackId, request.getBackupId());
try {
Stack stack = stackService.getByIdWithListsInTransaction(stackId);
Cluster cluster = stack.getCluster();
InstanceMetaData gatewayInstance = stack.getPrimaryGatewayInstance();
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, gatewayInstance, cluster.hasGateway());
Set<String> gatewayFQDN = Collections.singleton(gatewayInstance.getDiscoveryFQDN());
ExitCriteriaModel exitModel = ClusterDeletionBasedExitCriteriaModel.clusterDeletionBasedModel(stackId, cluster.getId());
String rangerAdminGroup = rangerVirtualGroupService.getRangerVirtualGroup(stack);
SaltConfig saltConfig = saltConfigGenerator.createSaltConfig(request.getBackupLocation(), request.getBackupId(), rangerAdminGroup, request.getCloseConnections(), stack);
hostOrchestrator.backupDatabase(gatewayConfig, gatewayFQDN, stackUtil.collectReachableNodes(stack), saltConfig, exitModel);
result = new DatabaseBackupSuccess(stackId);
} catch (Exception e) {
LOGGER.error("Database backup event failed", e);
result = new DatabaseBackupFailedEvent(stackId, e, DetailedStackStatus.DATABASE_BACKUP_FAILED);
}
return result;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class LoadBalancerSANProvider method getLoadBalancerSAN.
public Optional<String> getLoadBalancerSAN(Stack stack) {
checkNotNull(stack);
checkNotNull(stack.getCluster());
Cluster cluster = stack.getCluster();
CmTemplateProcessor cmTemplateProcessor = new CmTemplateProcessor(cluster.getBlueprint().getBlueprintText());
String cdhVersion = cmTemplateProcessor.getStackVersion();
if (isVersionNewerOrEqualThanLimited(cdhVersion, CLOUDERA_STACK_VERSION_7_2_11)) {
Set<LoadBalancer> loadBalancers = loadBalancerPersistenceService.findByStackId(stack.getId());
if (!loadBalancers.isEmpty()) {
Optional<LoadBalancer> loadBalancer = loadBalancerConfigService.selectLoadBalancerForFrontend(loadBalancers, LoadBalancerType.PUBLIC);
return loadBalancer.flatMap(this::getBestSANForLB);
}
}
return Optional.empty();
}
Aggregations