use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class StackUpscaleActions method cleanupFreeIpaAction.
@Bean(name = "CLEANUP_FREEIPA_UPSCALE_STATE")
public Action<?, ?> cleanupFreeIpaAction() {
return new AbstractStackUpscaleAction<>(ExtendHostMetadataResult.class) {
@Inject
private InstanceMetaDataService instanceMetaDataService;
@Override
protected void doExecute(StackScalingFlowContext context, ExtendHostMetadataResult payload, Map<Object, Object> variables) {
Set<InstanceMetaData> instanceMetaData = instanceMetaDataService.findNotTerminatedAndNotZombieForStack(context.getStack().getId());
Set<String> ips = payload.getRequest().getUpscaleCandidateAddresses();
Set<String> hostNames = instanceMetaData.stream().filter(im -> ips.contains(im.getPrivateIp())).filter(im -> im.getDiscoveryFQDN() != null).map(InstanceMetaData::getDiscoveryFQDN).collect(Collectors.toSet());
CleanupFreeIpaEvent cleanupFreeIpaEvent = new CleanupFreeIpaEvent(context.getStack().getId(), hostNames, ips, context.isRepair());
sendEvent(context, cleanupFreeIpaEvent);
}
};
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class CollectDownscaleCandidatesHandler method collectCandidates.
private Set<Long> collectCandidates(CollectDownscaleCandidatesRequest request, Stack stack) throws CloudbreakException {
Set<Long> privateIds = new HashSet<>();
LOGGER.debug("Collecting candidates for downscale based on [{}] and stack CRN [{}].", request, stack.getResourceCrn());
for (Map.Entry<String, Integer> entry : request.getHostGroupWithAdjustment().entrySet()) {
String hostGroupName = entry.getKey();
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stack.getCluster().getId(), hostGroupName).orElseThrow(NotFoundException.notFound("hostgroup", hostGroupName));
LOGGER.debug("Host group has been found for cluster! It's name: {}", hostGroup.getName());
List<InstanceMetaData> metaDataForInstanceGroup = instanceMetaDataService.findAliveInstancesInInstanceGroup(hostGroup.getInstanceGroup().getId());
Set<InstanceMetaData> collectedCandidates = clusterApiConnectors.getConnector(stack).clusterDecomissionService().collectDownscaleCandidates(hostGroup, entry.getValue(), new HashSet<>(metaDataForInstanceGroup));
String collectedHostsAsString = collectedCandidates.stream().map(instanceMetaData -> instanceMetaData.getDiscoveryFQDN() != null ? "FQDN: " + instanceMetaData.getDiscoveryFQDN() : "Private id: " + instanceMetaData.getPrivateId()).collect(Collectors.joining(", "));
LOGGER.debug("The following hosts has been collected as candidates for downscale: [{}]", collectedHostsAsString);
flowMessageService.fireEventAndLog(stack.getId(), AVAILABLE.name(), STACK_SELECT_FOR_DOWNSCALE, collectedHostsAsString);
privateIds.addAll(collectedCandidates.stream().map(InstanceMetaData::getPrivateId).collect(Collectors.toSet()));
}
return privateIds;
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class DecommissionHandler method accept.
@Override
public void accept(Event<DecommissionRequest> event) {
DecommissionRequest request = event.getData();
DecommissionResult result;
Set<String> hostNames = Collections.emptySet();
boolean forced = request.getDetails() != null && request.getDetails().isForced();
try {
Stack stack = stackService.getByIdWithListsInTransaction(request.getResourceId());
hostNames = getHostNamesForPrivateIds(request, stack);
ClusterDecomissionService clusterDecomissionService = getClusterDecomissionService(stack);
Map<String, InstanceMetaData> hostsToRemove = new HashMap<>();
Set<String> hostGroupNames = request.getHostGroupNames();
for (String hostGroup : hostGroupNames) {
hostsToRemove.putAll(getRemovableHosts(clusterDecomissionService, stack, hostGroup, hostNames));
}
updateInstancesToDeleteRequested(hostsToRemove.values());
if (!hostsToRemove.isEmpty()) {
executePreTerminationRecipes(stack, hostsToRemove.keySet());
}
Optional<String> runtimeVersion = runtimeVersionService.getRuntimeVersion(stack.getCluster().getId());
if (entitlementService.bulkHostsRemovalFromCMSupported(Crn.fromString(stack.getResourceCrn()).getAccountId()) && CMRepositoryVersionUtil.isCmBulkHostsRemovalAllowed(runtimeVersion)) {
result = bulkHostsRemoval(request, hostNames, forced, stack, clusterDecomissionService, hostsToRemove);
} else {
result = singleHostsRemoval(request, hostNames, forced, stack, clusterDecomissionService, hostsToRemove);
}
} catch (Exception e) {
LOGGER.info("Exception occurred during decommission.", e);
if (isTolerableError(e) && forced && !request.getDetails().isRepair()) {
eventService.fireCloudbreakEvent(request.getResourceId(), UPDATE_IN_PROGRESS.name(), CLUSTER_DECOMMISSION_FAILED_FORCE_DELETE_CONTINUE, Collections.singletonList(e.getMessage()));
result = new DecommissionResult(request, hostNames);
} else {
result = new DecommissionResult(e.getMessage(), e, request, hostNames, UNKNOWN_ERROR_PHASE);
}
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class UnhealthyInstancesDetectionHandler method accept.
@Override
public void accept(Event<UnhealthyInstancesDetectionRequest> event) {
UnhealthyInstancesDetectionRequest request = event.getData();
UnhealthyInstancesDetectionResult result;
Long stackId = request.getResourceId();
Stack stack = stackService.getByIdWithTransaction(stackId);
try {
Set<InstanceMetaData> candidateUnhealthyInstances = unhealthyInstanceSelector.selectCandidateUnhealthyInstances(stack.getId());
if (candidateUnhealthyInstances.isEmpty()) {
result = new UnhealthyInstancesDetectionResult(request, Collections.emptySet());
} else {
Set<String> unhealthyInstances = unhealthyInstancesFinalizer.finalizeUnhealthyInstances(stack, candidateUnhealthyInstances);
result = new UnhealthyInstancesDetectionResult(request, unhealthyInstances);
}
} catch (RuntimeException e) {
String msg = String.format("Could not get statuses for unhealty instances: %s", e.getMessage());
LOG.info(msg, e);
result = new UnhealthyInstancesDetectionResult(msg, e, request);
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class StackStatusCheckerJob method doSync.
private void doSync(Stack stack) {
ClusterApi connector = clusterApiConnectors.getConnector(stack);
Set<InstanceMetaData> runningInstances = instanceMetaDataService.findNotTerminatedAndNotZombieForStack(stack.getId());
try {
if (isClusterManagerRunning(stack, connector)) {
ExtendedHostStatuses extendedHostStatuses = getExtendedHostStatuses(stack, connector);
Map<HostName, Set<HealthCheck>> hostStatuses = extendedHostStatuses.getHostsHealth();
LOGGER.debug("Cluster '{}' state check, host certicates expiring: [{}], cm running, hoststates: {}", stack.getId(), extendedHostStatuses.isAnyCertExpiring(), hostStatuses);
reportHealthAndSyncInstances(stack, runningInstances, getFailedInstancesInstanceMetadata(extendedHostStatuses, runningInstances), getNewHealthyHostNames(extendedHostStatuses, runningInstances), extendedHostStatuses.isAnyCertExpiring());
} else {
syncInstances(stack, runningInstances, false);
}
} catch (RuntimeException e) {
LOGGER.warn("Error during sync", e);
syncInstances(stack, runningInstances, false);
}
}
Aggregations