Search in sources :

Example 11 with HostGroup

use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.

the class StopStartDownscaleDecommissionViaCMHandler method doAccept.

@Override
protected Selectable doAccept(HandlerEvent<StopStartDownscaleDecommissionViaCMRequest> event) {
    StopStartDownscaleDecommissionViaCMRequest request = event.getData();
    LOGGER.info("StopStartDownscaleDecommissionViaCMHandler for: {}, {}", event.getData().getResourceId(), event.getData());
    try {
        Stack stack = stackService.getByIdWithLists(request.getResourceId());
        Cluster cluster = stack.getCluster();
        ClusterDecomissionService clusterDecomissionService = clusterApiConnectors.getConnector(stack).clusterDecomissionService();
        Set<String> hostNames = getHostNamesForPrivateIds(request.getInstanceIdsToDecommission(), stack);
        LOGGER.debug("Attempting to decommission hosts. count={}, hostnames={}", hostNames.size(), hostNames);
        HostGroup hostGroup = hostGroupService.getByClusterIdAndName(cluster.getId(), request.getHostGroupName()).orElseThrow(NotFoundException.notFound("hostgroup", request.getHostGroupName()));
        Map<String, InstanceMetaData> hostsToRemove = clusterDecomissionService.collectHostsToRemove(hostGroup, hostNames);
        List<String> missingHostsInCm = Collections.emptyList();
        if (hostNames.size() != hostsToRemove.size()) {
            missingHostsInCm = hostNames.stream().filter(h -> !hostsToRemove.containsKey(h)).collect(Collectors.toList());
            LOGGER.info("Found fewer instances in CM to decommission, as compared to initial ask. foundCount={}, initialCount={}, missingHostsInCm={}", hostsToRemove.size(), hostNames.size(), missingHostsInCm);
        }
        // TODO CB-14929: Potentially put the nodes into maintenance mode before decommissioning?
        // TODO CB-15132: Eventually, try parsing the results of the CM decommission, and see if a partial decommission went through in the
        // timebound specified.
        Set<String> decommissionedHostNames = Collections.emptySet();
        if (hostsToRemove.size() > 0) {
            decommissionedHostNames = clusterDecomissionService.decommissionClusterNodesStopStart(hostsToRemove, POLL_FOR_10_MINUTES);
            updateInstanceStatuses(hostsToRemove, decommissionedHostNames, InstanceStatus.DECOMMISSIONED, "decommission requested for instances");
        }
        // This doesn't handle failures. It handles scenarios where CM list APIs don't have the necessary hosts available.
        List<String> allMissingHostnames = null;
        if (missingHostsInCm.size() > 0) {
            allMissingHostnames = new LinkedList<>(missingHostsInCm);
        }
        if (hostsToRemove.size() != decommissionedHostNames.size()) {
            Set<String> finalDecommissionedHostnames = decommissionedHostNames;
            List<String> additionalMissingDecommissionHostnames = hostsToRemove.keySet().stream().filter(h -> !finalDecommissionedHostnames.contains(h)).collect(Collectors.toList());
            LOGGER.info("Decommissioned fewer instances than requested. decommissionedCount={}, expectedCount={}, initialCount={}, notDecommissioned=[{}]", decommissionedHostNames.size(), hostsToRemove.size(), hostNames.size(), additionalMissingDecommissionHostnames);
            if (allMissingHostnames == null) {
                allMissingHostnames = new LinkedList<>();
            }
            allMissingHostnames.addAll(additionalMissingDecommissionHostnames);
        }
        LOGGER.info("hostsDecommissioned: count={}, hostNames={}", decommissionedHostNames.size(), decommissionedHostNames);
        if (decommissionedHostNames.size() > 0) {
            LOGGER.debug("Attempting to put decommissioned hosts into maintenance mode. count={}", decommissionedHostNames.size());
            flowMessageService.fireEventAndLog(stack.getId(), UPDATE_IN_PROGRESS.name(), CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTERINGCMMAINTMODE, String.valueOf(decommissionedHostNames.size()));
            clusterDecomissionService.enterMaintenanceMode(decommissionedHostNames);
            flowMessageService.fireEventAndLog(stack.getId(), UPDATE_IN_PROGRESS.name(), CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTEREDCMMAINTMODE, String.valueOf(decommissionedHostNames.size()));
            LOGGER.debug("Successfully put decommissioned hosts into maintenance mode. count={}", decommissionedHostNames.size());
        } else {
            LOGGER.debug("No nodes decommissioned, hence no nodes being put into maintenance mode");
        }
        return new StopStartDownscaleDecommissionViaCMResult(request, decommissionedHostNames, allMissingHostnames);
    } catch (Exception e) {
        // TODO CB-15132: This can be improved based on where and when the Exception occurred to potentially rollback certain aspects.
        // ClusterClientInitException is one which is explicitly thrown.
        String message = "Failed while attempting to decommission nodes via CM";
        LOGGER.error(message, e);
        return new StopStartDownscaleDecommissionViaCMResult(message, e, request);
    }
}
Also used : Stack(com.sequenceiq.cloudbreak.domain.stack.Stack) Cluster(com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster) ClusterDecomissionService(com.sequenceiq.cloudbreak.cluster.api.ClusterDecomissionService) StopStartDownscaleDecommissionViaCMResult(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.StopStartDownscaleDecommissionViaCMResult) CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTEREDCMMAINTMODE(com.sequenceiq.cloudbreak.event.ResourceEvent.CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTEREDCMMAINTMODE) LoggerFactory(org.slf4j.LoggerFactory) EventSelectorUtil(com.sequenceiq.flow.event.EventSelectorUtil) Selectable(com.sequenceiq.cloudbreak.common.event.Selectable) HostGroupService(com.sequenceiq.cloudbreak.service.hostgroup.HostGroupService) StringUtils(org.apache.commons.lang3.StringUtils) Inject(javax.inject.Inject) ExceptionCatcherEventHandler(com.sequenceiq.flow.reactor.api.handler.ExceptionCatcherEventHandler) UPDATE_IN_PROGRESS(com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status.UPDATE_IN_PROGRESS) CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTERINGCMMAINTMODE(com.sequenceiq.cloudbreak.event.ResourceEvent.CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTERINGCMMAINTMODE) Event(reactor.bus.Event) CloudbreakFlowMessageService(com.sequenceiq.cloudbreak.core.flow2.stack.CloudbreakFlowMessageService) Map(java.util.Map) InstanceMetaDataService(com.sequenceiq.cloudbreak.service.stack.InstanceMetaDataService) LinkedList(java.util.LinkedList) NotFoundException(com.sequenceiq.cloudbreak.common.exception.NotFoundException) Logger(org.slf4j.Logger) InstanceStatus(com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus) Set(java.util.Set) HandlerEvent(com.sequenceiq.flow.reactor.api.handler.HandlerEvent) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) StopStartDownscaleDecommissionViaCMRequest(com.sequenceiq.cloudbreak.reactor.api.event.cluster.StopStartDownscaleDecommissionViaCMRequest) List(java.util.List) Component(org.springframework.stereotype.Component) HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) InstanceMetaData(com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData) ClusterApiConnectors(com.sequenceiq.cloudbreak.service.cluster.ClusterApiConnectors) Optional(java.util.Optional) Collections(java.util.Collections) StackService(com.sequenceiq.cloudbreak.service.stack.StackService) StopStartDownscaleDecommissionViaCMResult(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.StopStartDownscaleDecommissionViaCMResult) Cluster(com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster) HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) StopStartDownscaleDecommissionViaCMRequest(com.sequenceiq.cloudbreak.reactor.api.event.cluster.StopStartDownscaleDecommissionViaCMRequest) NotFoundException(com.sequenceiq.cloudbreak.common.exception.NotFoundException) Stack(com.sequenceiq.cloudbreak.domain.stack.Stack) InstanceMetaData(com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData) ClusterDecomissionService(com.sequenceiq.cloudbreak.cluster.api.ClusterDecomissionService)

Example 12 with HostGroup

use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.

the class ClusterCommonService method recreateCluster.

private FlowIdentifier recreateCluster(Stack stack, UpdateClusterV4Request updateCluster) throws TransactionExecutionException {
    Set<HostGroup> hostGroups = new HashSet<>();
    for (HostGroupV4Request json : updateCluster.getHostgroups()) {
        HostGroup hostGroup = hostGroupV4RequestToHostGroupConverter.convert(json);
        hostGroup = hostGroupDecorator.decorate(hostGroup, json, stack);
        hostGroups.add(hostGroup);
    }
    return clusterOperationService.recreate(stack, updateCluster.getBlueprintName(), hostGroups, updateCluster.getValidateBlueprint());
}
Also used : HostGroupV4Request(com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.request.HostGroupV4Request) HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) HashSet(java.util.HashSet)

Example 13 with HostGroup

use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.

the class UpdateHostsValidator method validateRequest.

public boolean validateRequest(Stack stack, HostGroupAdjustmentV4Request hostGroupAdjustment) {
    HostGroup hostGroup = getHostGroup(stack, hostGroupAdjustment);
    int scalingAdjustment = hostGroupAdjustment.getScalingAdjustment();
    boolean downScale = scalingAdjustment < 0;
    if (scalingAdjustment == 0) {
        throw new BadRequestException("No scaling adjustments specified. Nothing to do.");
    }
    if (!downScale && hostGroup.getInstanceGroup() != null) {
        validateUnusedHosts(hostGroup.getInstanceGroup(), scalingAdjustment);
    } else {
        validateRegisteredHosts(stack, hostGroupAdjustment);
        if (hostGroupAdjustment.getWithStackUpdate() && hostGroupAdjustment.getScalingAdjustment() > 0) {
            throw new BadRequestException("ScalingAdjustment has to be decommission if you define withStackUpdate = 'true'.");
        }
    }
    return downScale;
}
Also used : HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) BadRequestException(com.sequenceiq.cloudbreak.common.exception.BadRequestException) Blueprint(com.sequenceiq.cloudbreak.domain.Blueprint)

Example 14 with HostGroup

use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.

the class RecipeEngine method uploadRecipes.

public void uploadRecipes(Long stackId) throws CloudbreakException {
    Stack stack = measure(() -> stackService.getByIdWithListsInTransaction(stackId), LOGGER, "stackService.getByIdWithListsInTransaction() took {} ms");
    MDCBuilder.buildMdcContext(stack);
    LOGGER.info("Upload recipes started for stack with name {}", stack.getName());
    stack.setResources(measure(() -> resourceService.getNotInstanceRelatedByStackId(stackId), LOGGER, "resourceService.getNotInstanceRelatedByStackId() took {} ms"));
    Set<HostGroup> hostGroups = measure(() -> hostGroupService.getByClusterWithRecipes(stack.getCluster().getId()), LOGGER, "hostGroupService.getByClusterWithRecipes() took {} ms");
    Map<HostGroup, List<RecipeModel>> recipeModels = recipeTemplateService.createRecipeModels(stack, hostGroups);
    uploadRecipesOnHostGroups(stack, hostGroups, recipeModels);
    LOGGER.info("Upload recipes finished successfully for stack with name {}", stack.getName());
}
Also used : HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) List(java.util.List) Stack(com.sequenceiq.cloudbreak.domain.stack.Stack)

Example 15 with HostGroup

use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.

the class ComponentLocatorService method getFqdnsByComponents.

private Map<String, List<String>> getFqdnsByComponents(Cluster cluster, Collection<String> componentNames) {
    Map<String, List<String>> fqdnsByService = new HashMap<>();
    String blueprintText = cluster.getBlueprint().getBlueprintText();
    BlueprintTextProcessor processor = cmTemplateProcessorFactory.get(blueprintText);
    for (HostGroup hg : hostGroupService.getByCluster(cluster.getId())) {
        Set<String> hgComponents = new HashSet<>(processor.getComponentsInHostGroup(hg.getName()));
        hgComponents.retainAll(componentNames);
        fillList(fqdnsByService, hg, hgComponents);
    }
    return fqdnsByService;
}
Also used : HashMap(java.util.HashMap) BlueprintTextProcessor(com.sequenceiq.cloudbreak.template.processor.BlueprintTextProcessor) HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) ArrayList(java.util.ArrayList) Collectors.toList(java.util.stream.Collectors.toList) List(java.util.List) HashSet(java.util.HashSet)

Aggregations

HostGroup (com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup)132 InstanceMetaData (com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData)66 Stack (com.sequenceiq.cloudbreak.domain.stack.Stack)60 HashSet (java.util.HashSet)55 Test (org.junit.jupiter.api.Test)52 List (java.util.List)50 InstanceGroup (com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup)43 Cluster (com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster)37 Map (java.util.Map)37 Set (java.util.Set)37 Collectors (java.util.stream.Collectors)35 HostGroupService (com.sequenceiq.cloudbreak.service.hostgroup.HostGroupService)25 Optional (java.util.Optional)25 ArrayList (java.util.ArrayList)24 StackService (com.sequenceiq.cloudbreak.service.stack.StackService)22 InstanceStatus (com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus)21 ClusterApiConnectors (com.sequenceiq.cloudbreak.service.cluster.ClusterApiConnectors)21 HashMap (java.util.HashMap)19 Logger (org.slf4j.Logger)19 LoggerFactory (org.slf4j.LoggerFactory)19