use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterSyncHandler method accept.
@Override
public void accept(Event<ClusterSyncRequest> event) {
ClusterSyncRequest request = event.getData();
ClusterSyncResult result;
try {
Stack stack = stackService.getByIdWithListsInTransaction(request.getResourceId());
Cluster cluster = clusterService.retrieveClusterByStackIdWithoutAuth(request.getResourceId()).orElse(null);
clusterStatusUpdater.updateClusterStatus(stack, cluster);
if (cluster != null && (stack.isAvailable() || stack.isMaintenanceModeEnabled())) {
instanceMetadataUpdater.updatePackageVersionsOnAllInstances(stack.getId());
}
result = new ClusterSyncResult(request);
} catch (Exception e) {
result = new ClusterSyncResult(e.getMessage(), e, request);
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class CollectDownscaleCandidatesHandler method collectCandidates.
private Set<Long> collectCandidates(CollectDownscaleCandidatesRequest request, Stack stack) throws CloudbreakException {
Set<Long> privateIds = new HashSet<>();
LOGGER.debug("Collecting candidates for downscale based on [{}] and stack CRN [{}].", request, stack.getResourceCrn());
for (Map.Entry<String, Integer> entry : request.getHostGroupWithAdjustment().entrySet()) {
String hostGroupName = entry.getKey();
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stack.getCluster().getId(), hostGroupName).orElseThrow(NotFoundException.notFound("hostgroup", hostGroupName));
LOGGER.debug("Host group has been found for cluster! It's name: {}", hostGroup.getName());
List<InstanceMetaData> metaDataForInstanceGroup = instanceMetaDataService.findAliveInstancesInInstanceGroup(hostGroup.getInstanceGroup().getId());
Set<InstanceMetaData> collectedCandidates = clusterApiConnectors.getConnector(stack).clusterDecomissionService().collectDownscaleCandidates(hostGroup, entry.getValue(), new HashSet<>(metaDataForInstanceGroup));
String collectedHostsAsString = collectedCandidates.stream().map(instanceMetaData -> instanceMetaData.getDiscoveryFQDN() != null ? "FQDN: " + instanceMetaData.getDiscoveryFQDN() : "Private id: " + instanceMetaData.getPrivateId()).collect(Collectors.joining(", "));
LOGGER.debug("The following hosts has been collected as candidates for downscale: [{}]", collectedHostsAsString);
flowMessageService.fireEventAndLog(stack.getId(), AVAILABLE.name(), STACK_SELECT_FOR_DOWNSCALE, collectedHostsAsString);
privateIds.addAll(collectedCandidates.stream().map(InstanceMetaData::getPrivateId).collect(Collectors.toSet()));
}
return privateIds;
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class DecommissionHandler method accept.
@Override
public void accept(Event<DecommissionRequest> event) {
DecommissionRequest request = event.getData();
DecommissionResult result;
Set<String> hostNames = Collections.emptySet();
boolean forced = request.getDetails() != null && request.getDetails().isForced();
try {
Stack stack = stackService.getByIdWithListsInTransaction(request.getResourceId());
hostNames = getHostNamesForPrivateIds(request, stack);
ClusterDecomissionService clusterDecomissionService = getClusterDecomissionService(stack);
Map<String, InstanceMetaData> hostsToRemove = new HashMap<>();
Set<String> hostGroupNames = request.getHostGroupNames();
for (String hostGroup : hostGroupNames) {
hostsToRemove.putAll(getRemovableHosts(clusterDecomissionService, stack, hostGroup, hostNames));
}
updateInstancesToDeleteRequested(hostsToRemove.values());
if (!hostsToRemove.isEmpty()) {
executePreTerminationRecipes(stack, hostsToRemove.keySet());
}
Optional<String> runtimeVersion = runtimeVersionService.getRuntimeVersion(stack.getCluster().getId());
if (entitlementService.bulkHostsRemovalFromCMSupported(Crn.fromString(stack.getResourceCrn()).getAccountId()) && CMRepositoryVersionUtil.isCmBulkHostsRemovalAllowed(runtimeVersion)) {
result = bulkHostsRemoval(request, hostNames, forced, stack, clusterDecomissionService, hostsToRemove);
} else {
result = singleHostsRemoval(request, hostNames, forced, stack, clusterDecomissionService, hostsToRemove);
}
} catch (Exception e) {
LOGGER.info("Exception occurred during decommission.", e);
if (isTolerableError(e) && forced && !request.getDetails().isRepair()) {
eventService.fireCloudbreakEvent(request.getResourceId(), UPDATE_IN_PROGRESS.name(), CLUSTER_DECOMMISSION_FAILED_FORCE_DELETE_CONTINUE, Collections.singletonList(e.getMessage()));
result = new DecommissionResult(request, hostNames);
} else {
result = new DecommissionResult(e.getMessage(), e, request, hostNames, UNKNOWN_ERROR_PHASE);
}
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class UnhealthyInstancesDetectionHandler method accept.
@Override
public void accept(Event<UnhealthyInstancesDetectionRequest> event) {
UnhealthyInstancesDetectionRequest request = event.getData();
UnhealthyInstancesDetectionResult result;
Long stackId = request.getResourceId();
Stack stack = stackService.getByIdWithTransaction(stackId);
try {
Set<InstanceMetaData> candidateUnhealthyInstances = unhealthyInstanceSelector.selectCandidateUnhealthyInstances(stack.getId());
if (candidateUnhealthyInstances.isEmpty()) {
result = new UnhealthyInstancesDetectionResult(request, Collections.emptySet());
} else {
Set<String> unhealthyInstances = unhealthyInstancesFinalizer.finalizeUnhealthyInstances(stack, candidateUnhealthyInstances);
result = new UnhealthyInstancesDetectionResult(request, unhealthyInstances);
}
} catch (RuntimeException e) {
String msg = String.format("Could not get statuses for unhealty instances: %s", e.getMessage());
LOG.info(msg, e);
result = new UnhealthyInstancesDetectionResult(msg, e, request);
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class StackStatusCheckerJob method executeTracedJob.
@Override
protected void executeTracedJob(JobExecutionContext context) throws JobExecutionException {
if (flowLogService.isOtherFlowRunning(getStackId())) {
LOGGER.debug("StackStatusCheckerJob cannot run, because flow is running for stack: {}", getStackId());
return;
}
try {
measure(() -> {
Stack stack = stackService.get(getStackId());
Status stackStatus = stack.getStatus();
if (Status.getUnschedulableStatuses().contains(stackStatus)) {
LOGGER.debug("Stack sync will be unscheduled, stack state is {}", stackStatus);
jobService.unschedule(getLocalId());
} else if (shouldSwitchToLongSyncJob(stackStatus, context)) {
LOGGER.debug("Stack sync will be scheduled to long polling, stack state is {}", stackStatus);
jobService.unschedule(getLocalId());
jobService.scheduleLongIntervalCheck(getStackId(), StackJobAdapter.class);
} else if (null == stackStatus || ignoredStates().contains(stackStatus)) {
LOGGER.debug("Stack sync is skipped, stack state is {}", stackStatus);
} else if (syncableStates().contains(stackStatus)) {
RegionAwareInternalCrnGenerator dataHub = regionAwareInternalCrnGeneratorFactory.datahub();
ThreadBasedUserCrnProvider.doAs(dataHub.getInternalCrnForServiceAsString(), () -> doSync(stack));
switchToShortSyncIfNecessary(context);
} else {
LOGGER.warn("Unhandled stack status, {}", stackStatus);
}
}, LOGGER, "Check status took {} ms for stack {}.", getStackId());
} catch (Exception e) {
LOGGER.info("Exception during cluster state check.", e);
}
}
Aggregations