Search in sources :

Example 66 with Stack

use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.

the class ClusterSyncHandler method accept.

@Override
public void accept(Event<ClusterSyncRequest> event) {
    ClusterSyncRequest request = event.getData();
    ClusterSyncResult result;
    try {
        Stack stack = stackService.getByIdWithListsInTransaction(request.getResourceId());
        Cluster cluster = clusterService.retrieveClusterByStackIdWithoutAuth(request.getResourceId()).orElse(null);
        clusterStatusUpdater.updateClusterStatus(stack, cluster);
        if (cluster != null && (stack.isAvailable() || stack.isMaintenanceModeEnabled())) {
            instanceMetadataUpdater.updatePackageVersionsOnAllInstances(stack.getId());
        }
        result = new ClusterSyncResult(request);
    } catch (Exception e) {
        result = new ClusterSyncResult(e.getMessage(), e, request);
    }
    eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
Also used : ClusterSyncRequest(com.sequenceiq.cloudbreak.reactor.api.event.resource.ClusterSyncRequest) Cluster(com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster) ClusterSyncResult(com.sequenceiq.cloudbreak.reactor.api.event.resource.ClusterSyncResult) Stack(com.sequenceiq.cloudbreak.domain.stack.Stack)

Example 67 with Stack

use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.

the class CollectDownscaleCandidatesHandler method collectCandidates.

private Set<Long> collectCandidates(CollectDownscaleCandidatesRequest request, Stack stack) throws CloudbreakException {
    Set<Long> privateIds = new HashSet<>();
    LOGGER.debug("Collecting candidates for downscale based on [{}] and stack CRN [{}].", request, stack.getResourceCrn());
    for (Map.Entry<String, Integer> entry : request.getHostGroupWithAdjustment().entrySet()) {
        String hostGroupName = entry.getKey();
        HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stack.getCluster().getId(), hostGroupName).orElseThrow(NotFoundException.notFound("hostgroup", hostGroupName));
        LOGGER.debug("Host group has been found for cluster! It's name: {}", hostGroup.getName());
        List<InstanceMetaData> metaDataForInstanceGroup = instanceMetaDataService.findAliveInstancesInInstanceGroup(hostGroup.getInstanceGroup().getId());
        Set<InstanceMetaData> collectedCandidates = clusterApiConnectors.getConnector(stack).clusterDecomissionService().collectDownscaleCandidates(hostGroup, entry.getValue(), new HashSet<>(metaDataForInstanceGroup));
        String collectedHostsAsString = collectedCandidates.stream().map(instanceMetaData -> instanceMetaData.getDiscoveryFQDN() != null ? "FQDN: " + instanceMetaData.getDiscoveryFQDN() : "Private id: " + instanceMetaData.getPrivateId()).collect(Collectors.joining(", "));
        LOGGER.debug("The following hosts has been collected as candidates for downscale: [{}]", collectedHostsAsString);
        flowMessageService.fireEventAndLog(stack.getId(), AVAILABLE.name(), STACK_SELECT_FOR_DOWNSCALE, collectedHostsAsString);
        privateIds.addAll(collectedCandidates.stream().map(InstanceMetaData::getPrivateId).collect(Collectors.toSet()));
    }
    return privateIds;
}
Also used : Stack(com.sequenceiq.cloudbreak.domain.stack.Stack) CloudbreakException(com.sequenceiq.cloudbreak.service.CloudbreakException) STACK_SELECT_FOR_DOWNSCALE(com.sequenceiq.cloudbreak.event.ResourceEvent.STACK_SELECT_FOR_DOWNSCALE) Resource(com.sequenceiq.cloudbreak.domain.Resource) AVAILABLE(com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status.AVAILABLE) LoggerFactory(org.slf4j.LoggerFactory) EventSelectorUtil(com.sequenceiq.flow.event.EventSelectorUtil) HostGroupService(com.sequenceiq.cloudbreak.service.hostgroup.HostGroupService) CollectionUtils.isEmpty(org.apache.commons.collections4.CollectionUtils.isEmpty) CollectionUtils(org.apache.commons.collections4.CollectionUtils) HashSet(java.util.HashSet) Inject(javax.inject.Inject) ResourceService(com.sequenceiq.cloudbreak.service.resource.ResourceService) Event(reactor.bus.Event) CloudbreakFlowMessageService(com.sequenceiq.cloudbreak.core.flow2.stack.CloudbreakFlowMessageService) Map(java.util.Map) InstanceMetaDataService(com.sequenceiq.cloudbreak.service.stack.InstanceMetaDataService) EventHandler(com.sequenceiq.flow.reactor.api.handler.EventHandler) NotFoundException(com.sequenceiq.cloudbreak.common.exception.NotFoundException) Logger(org.slf4j.Logger) Collection(java.util.Collection) Set(java.util.Set) EventBus(reactor.bus.EventBus) Collectors(java.util.stream.Collectors) List(java.util.List) Component(org.springframework.stereotype.Component) HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) InstanceMetaData(com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData) CollectDownscaleCandidatesRequest(com.sequenceiq.cloudbreak.reactor.api.event.resource.CollectDownscaleCandidatesRequest) ClusterApiConnectors(com.sequenceiq.cloudbreak.service.cluster.ClusterApiConnectors) CollectDownscaleCandidatesResult(com.sequenceiq.cloudbreak.reactor.api.event.resource.CollectDownscaleCandidatesResult) StackService(com.sequenceiq.cloudbreak.service.stack.StackService) HostGroup(com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup) InstanceMetaData(com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData) Map(java.util.Map) HashSet(java.util.HashSet)

Example 68 with Stack

use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.

the class DecommissionHandler method accept.

@Override
public void accept(Event<DecommissionRequest> event) {
    DecommissionRequest request = event.getData();
    DecommissionResult result;
    Set<String> hostNames = Collections.emptySet();
    boolean forced = request.getDetails() != null && request.getDetails().isForced();
    try {
        Stack stack = stackService.getByIdWithListsInTransaction(request.getResourceId());
        hostNames = getHostNamesForPrivateIds(request, stack);
        ClusterDecomissionService clusterDecomissionService = getClusterDecomissionService(stack);
        Map<String, InstanceMetaData> hostsToRemove = new HashMap<>();
        Set<String> hostGroupNames = request.getHostGroupNames();
        for (String hostGroup : hostGroupNames) {
            hostsToRemove.putAll(getRemovableHosts(clusterDecomissionService, stack, hostGroup, hostNames));
        }
        updateInstancesToDeleteRequested(hostsToRemove.values());
        if (!hostsToRemove.isEmpty()) {
            executePreTerminationRecipes(stack, hostsToRemove.keySet());
        }
        Optional<String> runtimeVersion = runtimeVersionService.getRuntimeVersion(stack.getCluster().getId());
        if (entitlementService.bulkHostsRemovalFromCMSupported(Crn.fromString(stack.getResourceCrn()).getAccountId()) && CMRepositoryVersionUtil.isCmBulkHostsRemovalAllowed(runtimeVersion)) {
            result = bulkHostsRemoval(request, hostNames, forced, stack, clusterDecomissionService, hostsToRemove);
        } else {
            result = singleHostsRemoval(request, hostNames, forced, stack, clusterDecomissionService, hostsToRemove);
        }
    } catch (Exception e) {
        LOGGER.info("Exception occurred during decommission.", e);
        if (isTolerableError(e) && forced && !request.getDetails().isRepair()) {
            eventService.fireCloudbreakEvent(request.getResourceId(), UPDATE_IN_PROGRESS.name(), CLUSTER_DECOMMISSION_FAILED_FORCE_DELETE_CONTINUE, Collections.singletonList(e.getMessage()));
            result = new DecommissionResult(request, hostNames);
        } else {
            result = new DecommissionResult(e.getMessage(), e, request, hostNames, UNKNOWN_ERROR_PHASE);
        }
    }
    eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
Also used : HashMap(java.util.HashMap) CloudbreakException(com.sequenceiq.cloudbreak.service.CloudbreakException) ClusterClientInitException(com.sequenceiq.cloudbreak.cluster.service.ClusterClientInitException) NotFoundException(com.sequenceiq.cloudbreak.common.exception.NotFoundException) FreeIpaOperationFailedException(com.sequenceiq.cloudbreak.service.freeipa.FreeIpaOperationFailedException) CloudbreakOrchestratorFailedException(com.sequenceiq.cloudbreak.orchestrator.exception.CloudbreakOrchestratorFailedException) CloudbreakServiceException(com.sequenceiq.cloudbreak.common.exception.CloudbreakServiceException) Stack(com.sequenceiq.cloudbreak.domain.stack.Stack) InstanceMetaData(com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData) ClusterDecomissionService(com.sequenceiq.cloudbreak.cluster.api.ClusterDecomissionService) DecommissionRequest(com.sequenceiq.cloudbreak.reactor.api.event.resource.DecommissionRequest) DecommissionResult(com.sequenceiq.cloudbreak.reactor.api.event.resource.DecommissionResult)

Example 69 with Stack

use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.

the class UnhealthyInstancesDetectionHandler method accept.

@Override
public void accept(Event<UnhealthyInstancesDetectionRequest> event) {
    UnhealthyInstancesDetectionRequest request = event.getData();
    UnhealthyInstancesDetectionResult result;
    Long stackId = request.getResourceId();
    Stack stack = stackService.getByIdWithTransaction(stackId);
    try {
        Set<InstanceMetaData> candidateUnhealthyInstances = unhealthyInstanceSelector.selectCandidateUnhealthyInstances(stack.getId());
        if (candidateUnhealthyInstances.isEmpty()) {
            result = new UnhealthyInstancesDetectionResult(request, Collections.emptySet());
        } else {
            Set<String> unhealthyInstances = unhealthyInstancesFinalizer.finalizeUnhealthyInstances(stack, candidateUnhealthyInstances);
            result = new UnhealthyInstancesDetectionResult(request, unhealthyInstances);
        }
    } catch (RuntimeException e) {
        String msg = String.format("Could not get statuses for unhealty instances: %s", e.getMessage());
        LOG.info(msg, e);
        result = new UnhealthyInstancesDetectionResult(msg, e, request);
    }
    eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
Also used : UnhealthyInstancesDetectionResult(com.sequenceiq.cloudbreak.reactor.api.event.resource.UnhealthyInstancesDetectionResult) InstanceMetaData(com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData) UnhealthyInstancesDetectionRequest(com.sequenceiq.cloudbreak.reactor.api.event.resource.UnhealthyInstancesDetectionRequest) Stack(com.sequenceiq.cloudbreak.domain.stack.Stack)

Example 70 with Stack

use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.

the class StackStatusCheckerJob method executeTracedJob.

@Override
protected void executeTracedJob(JobExecutionContext context) throws JobExecutionException {
    if (flowLogService.isOtherFlowRunning(getStackId())) {
        LOGGER.debug("StackStatusCheckerJob cannot run, because flow is running for stack: {}", getStackId());
        return;
    }
    try {
        measure(() -> {
            Stack stack = stackService.get(getStackId());
            Status stackStatus = stack.getStatus();
            if (Status.getUnschedulableStatuses().contains(stackStatus)) {
                LOGGER.debug("Stack sync will be unscheduled, stack state is {}", stackStatus);
                jobService.unschedule(getLocalId());
            } else if (shouldSwitchToLongSyncJob(stackStatus, context)) {
                LOGGER.debug("Stack sync will be scheduled to long polling, stack state is {}", stackStatus);
                jobService.unschedule(getLocalId());
                jobService.scheduleLongIntervalCheck(getStackId(), StackJobAdapter.class);
            } else if (null == stackStatus || ignoredStates().contains(stackStatus)) {
                LOGGER.debug("Stack sync is skipped, stack state is {}", stackStatus);
            } else if (syncableStates().contains(stackStatus)) {
                RegionAwareInternalCrnGenerator dataHub = regionAwareInternalCrnGeneratorFactory.datahub();
                ThreadBasedUserCrnProvider.doAs(dataHub.getInternalCrnForServiceAsString(), () -> doSync(stack));
                switchToShortSyncIfNecessary(context);
            } else {
                LOGGER.warn("Unhandled stack status, {}", stackStatus);
            }
        }, LOGGER, "Check status took {} ms for stack {}.", getStackId());
    } catch (Exception e) {
        LOGGER.info("Exception during cluster state check.", e);
    }
}
Also used : DetailedStackStatus(com.sequenceiq.cloudbreak.api.endpoint.v4.common.DetailedStackStatus) CloudVmInstanceStatus(com.sequenceiq.cloudbreak.cloud.model.CloudVmInstanceStatus) InstanceStatus(com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus) Status(com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status) RegionAwareInternalCrnGenerator(com.sequenceiq.cloudbreak.auth.crn.RegionAwareInternalCrnGenerator) JobExecutionException(org.quartz.JobExecutionException) Stack(com.sequenceiq.cloudbreak.domain.stack.Stack)

Aggregations

Stack (com.sequenceiq.cloudbreak.domain.stack.Stack)1041 Test (org.junit.jupiter.api.Test)326 Cluster (com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster)255 Test (org.junit.Test)208 InstanceMetaData (com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData)158 Map (java.util.Map)114 DetailedEnvironmentResponse (com.sequenceiq.environment.api.v1.environment.model.response.DetailedEnvironmentResponse)113 InstanceGroup (com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup)112 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)111 List (java.util.List)101 Set (java.util.Set)101 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)100 Collectors (java.util.stream.Collectors)84 Optional (java.util.Optional)83 HashSet (java.util.HashSet)82 Inject (javax.inject.Inject)80 Logger (org.slf4j.Logger)78 LoggerFactory (org.slf4j.LoggerFactory)78 DetailedStackStatus (com.sequenceiq.cloudbreak.api.endpoint.v4.common.DetailedStackStatus)69 StackStatus (com.sequenceiq.cloudbreak.domain.stack.StackStatus)67