Search in sources :

Example 16 with Stack

use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.

the class AmbariClusterService method updateUserNamePassword.

@Override
public void updateUserNamePassword(Long stackId, UserNamePasswordJson userNamePasswordJson) {
    Stack stack = stackService.get(stackId);
    Cluster cluster = stack.getCluster();
    String oldUserName = cluster.getUserName();
    String oldPassword = cluster.getPassword();
    String newUserName = userNamePasswordJson.getUserName();
    String newPassword = userNamePasswordJson.getPassword();
    if (!newUserName.equals(oldUserName)) {
        flowManager.triggerClusterCredentialReplace(stack.getId(), userNamePasswordJson.getUserName(), userNamePasswordJson.getPassword());
    } else if (!newPassword.equals(oldPassword)) {
        flowManager.triggerClusterCredentialUpdate(stack.getId(), userNamePasswordJson.getPassword());
    } else {
        throw new BadRequestException("The request may not change credential");
    }
}
Also used : Cluster(com.sequenceiq.cloudbreak.domain.Cluster) BadRequestException(com.sequenceiq.cloudbreak.controller.BadRequestException) Stack(com.sequenceiq.cloudbreak.domain.Stack)

Example 17 with Stack

use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.

the class AmbariClusterService method failureReport.

@Override
public void failureReport(Long stackId, List<String> failedNodes) {
    Stack stack = stackService.get(stackId);
    Cluster cluster = stack.getCluster();
    Map<String, List<String>> autoRecoveryNodesMap = new HashMap<>();
    Map<String, HostMetadata> autoRecoveryHostMetadata = new HashMap<>();
    Map<String, HostMetadata> failedHostMetadata = new HashMap<>();
    for (String failedNode : failedNodes) {
        HostMetadata hostMetadata = hostMetadataRepository.findHostInClusterByName(cluster.getId(), failedNode);
        if (hostMetadata == null) {
            throw new BadRequestException("No metadata information for the node: " + failedNode);
        }
        HostGroup hostGroup = hostMetadata.getHostGroup();
        if (hostGroup.getRecoveryMode() == RecoveryMode.AUTO) {
            validateRepair(stack, hostMetadata);
        }
        String hostGroupName = hostGroup.getName();
        if (hostGroup.getRecoveryMode() == RecoveryMode.AUTO) {
            List<String> nodeList = autoRecoveryNodesMap.get(hostGroupName);
            if (nodeList == null) {
                validateComponentsCategory(stack, hostGroupName);
                nodeList = new ArrayList<>();
                autoRecoveryNodesMap.put(hostGroupName, nodeList);
            }
            nodeList.add(failedNode);
            autoRecoveryHostMetadata.put(failedNode, hostMetadata);
        } else if (hostGroup.getRecoveryMode() == RecoveryMode.MANUAL) {
            failedHostMetadata.put(failedNode, hostMetadata);
        }
    }
    if (!autoRecoveryNodesMap.isEmpty()) {
        flowManager.triggerClusterRepairFlow(stackId, autoRecoveryNodesMap, false);
        String recoveryMessage = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_AUTORECOVERY_REQUESTED.code(), Collections.singletonList(autoRecoveryNodesMap));
        updateChangedHosts(cluster, autoRecoveryHostMetadata, HostMetadataState.HEALTHY, HostMetadataState.WAITING_FOR_REPAIR, recoveryMessage);
    }
    if (!failedHostMetadata.isEmpty()) {
        String recoveryMessage = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_FAILED_NODES_REPORTED.code(), Collections.singletonList(failedHostMetadata.keySet()));
        updateChangedHosts(cluster, failedHostMetadata, HostMetadataState.HEALTHY, HostMetadataState.UNHEALTHY, recoveryMessage);
    }
}
Also used : HashMap(java.util.HashMap) Cluster(com.sequenceiq.cloudbreak.domain.Cluster) BadRequestException(com.sequenceiq.cloudbreak.controller.BadRequestException) HostGroup(com.sequenceiq.cloudbreak.domain.HostGroup) ArrayList(java.util.ArrayList) List(java.util.List) Stack(com.sequenceiq.cloudbreak.domain.Stack) HostMetadata(com.sequenceiq.cloudbreak.domain.HostMetadata)

Example 18 with Stack

use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.

the class AmbariClusterService method updateClusterMetadata.

@Override
@Transactional(TxType.NEVER)
public Cluster updateClusterMetadata(Long stackId) {
    Stack stack = stackService.getById(stackId);
    AmbariClient ambariClient = getAmbariClient(stack);
    Map<String, Integer> hostGroupCounter = new HashMap<>();
    Set<HostMetadata> hosts = hostMetadataRepository.findHostsInCluster(stack.getCluster().getId());
    Map<String, String> hostStatuses = ambariClient.getHostStatuses();
    for (HostMetadata host : hosts) {
        if (hostStatuses.containsKey(host.getHostName())) {
            String hgName = host.getHostGroup().getName();
            Integer hgCounter = hostGroupCounter.getOrDefault(hgName, 0) + 1;
            hostGroupCounter.put(hgName, hgCounter);
            HostMetadataState newState = HostMetadataState.HEALTHY.name().equals(hostStatuses.get(host.getHostName())) ? HostMetadataState.HEALTHY : HostMetadataState.UNHEALTHY;
            boolean stateChanged = updateHostMetadataByHostState(stack, host.getHostName(), newState);
            if (stateChanged && HostMetadataState.HEALTHY == newState) {
                updateInstanceMetadataStateToRegistered(stackId, host);
            }
        }
    }
    hostGroupCounter(stack.getCluster().getId(), hostGroupCounter);
    return stack.getCluster();
}
Also used : HashMap(java.util.HashMap) HostMetadataState(com.sequenceiq.cloudbreak.common.type.HostMetadataState) Stack(com.sequenceiq.cloudbreak.domain.Stack) AmbariClient(com.sequenceiq.ambari.client.AmbariClient) HostMetadata(com.sequenceiq.cloudbreak.domain.HostMetadata) Transactional(javax.transaction.Transactional)

Example 19 with Stack

use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.

the class AmbariClusterService method repairCluster.

@Override
public void repairCluster(Long stackId, List<String> repairedHostGroups, boolean removeOnly) {
    Stack stack = stackService.get(stackId);
    Cluster cluster = stack.getCluster();
    Set<HostGroup> hostGroups = hostGroupService.getByCluster(cluster.getId());
    Map<String, List<String>> failedNodeMap = new HashMap<>();
    for (HostGroup hg : hostGroups) {
        List<String> failedNodes = new ArrayList<>();
        if (repairedHostGroups.contains(hg.getName()) && hg.getRecoveryMode() == RecoveryMode.MANUAL) {
            for (HostMetadata hmd : hg.getHostMetadata()) {
                if (hmd.getHostMetadataState() == HostMetadataState.UNHEALTHY) {
                    validateRepair(stack, hmd);
                    if (!failedNodeMap.containsKey(hg.getName())) {
                        failedNodeMap.put(hg.getName(), failedNodes);
                    }
                    failedNodes.add(hmd.getHostName());
                }
            }
        }
    }
    if (!failedNodeMap.isEmpty()) {
        flowManager.triggerClusterRepairFlow(stackId, failedNodeMap, removeOnly);
        String recoveryMessage = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_MANUALRECOVERY_REQUESTED.code(), Collections.singletonList(repairedHostGroups));
        LOGGER.info(recoveryMessage);
        eventService.fireCloudbreakEvent(stack.getId(), "RECOVERY", recoveryMessage);
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Cluster(com.sequenceiq.cloudbreak.domain.Cluster) HostGroup(com.sequenceiq.cloudbreak.domain.HostGroup) ArrayList(java.util.ArrayList) List(java.util.List) Stack(com.sequenceiq.cloudbreak.domain.Stack) HostMetadata(com.sequenceiq.cloudbreak.domain.HostMetadata)

Example 20 with Stack

use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.

the class AmbariClusterService method upgrade.

@Override
public void upgrade(Long stackId, AmbariRepo ambariRepoUpgrade) {
    if (ambariRepoUpgrade != null) {
        Stack stack = stackService.getByIdWithLists(stackId);
        Cluster cluster = clusterRepository.findById(stack.getCluster().getId());
        if (cluster == null) {
            throw new BadRequestException(String.format("Cluster does not exist on stack with '%s' id.", stackId));
        }
        if (!stack.isAvailable()) {
            throw new BadRequestException(String.format("Stack '%s' is currently in '%s' state. Upgrade requests to a cluster can only be made if the underlying stack is 'AVAILABLE'.", stackId, stack.getStatus()));
        }
        if (!cluster.isAvailable()) {
            throw new BadRequestException(String.format("Cluster '%s' is currently in '%s' state. Upgrade requests to a cluster can only be made if the underlying stack is 'AVAILABLE'.", stackId, stack.getStatus()));
        }
        AmbariRepo ambariRepo = clusterComponentConfigProvider.getAmbariRepo(cluster.getId());
        if (ambariRepo == null) {
            try {
                clusterComponentConfigProvider.store(new ClusterComponent(ComponentType.AMBARI_REPO_DETAILS, new Json(ambariRepoUpgrade), stack.getCluster()));
            } catch (JsonProcessingException ignored) {
                throw new BadRequestException(String.format("Ambari repo details cannot be saved. %s", ambariRepoUpgrade));
            }
        } else {
            ClusterComponent component = clusterComponentConfigProvider.getComponent(cluster.getId(), ComponentType.AMBARI_REPO_DETAILS);
            ambariRepo.setBaseUrl(ambariRepoUpgrade.getBaseUrl());
            ambariRepo.setGpgKeyUrl(ambariRepoUpgrade.getGpgKeyUrl());
            ambariRepo.setPredefined(false);
            ambariRepo.setVersion(ambariRepoUpgrade.getVersion());
            try {
                component.setAttributes(new Json(ambariRepo));
                clusterComponentConfigProvider.store(component);
            } catch (JsonProcessingException ignored) {
                throw new BadRequestException(String.format("Ambari repo details cannot be saved. %s", ambariRepoUpgrade));
            }
        }
        try {
            flowManager.triggerClusterUpgrade(stack.getId());
        } catch (RuntimeException e) {
            throw new CloudbreakServiceException(e);
        }
    }
}
Also used : ClusterComponent(com.sequenceiq.cloudbreak.domain.ClusterComponent) CloudbreakServiceException(com.sequenceiq.cloudbreak.service.CloudbreakServiceException) Cluster(com.sequenceiq.cloudbreak.domain.Cluster) BadRequestException(com.sequenceiq.cloudbreak.controller.BadRequestException) AmbariRepo(com.sequenceiq.cloudbreak.cloud.model.AmbariRepo) BlueprintParameterJson(com.sequenceiq.cloudbreak.api.model.BlueprintParameterJson) UserNamePasswordJson(com.sequenceiq.cloudbreak.api.model.UserNamePasswordJson) HostGroupAdjustmentJson(com.sequenceiq.cloudbreak.api.model.HostGroupAdjustmentJson) Json(com.sequenceiq.cloudbreak.domain.json.Json) BlueprintInputJson(com.sequenceiq.cloudbreak.api.model.BlueprintInputJson) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Stack(com.sequenceiq.cloudbreak.domain.Stack)

Aggregations

Stack (com.sequenceiq.cloudbreak.domain.Stack)207 Cluster (com.sequenceiq.cloudbreak.domain.Cluster)74 Test (org.junit.Test)70 AmbariClient (com.sequenceiq.ambari.client.AmbariClient)32 InstanceMetaData (com.sequenceiq.cloudbreak.domain.InstanceMetaData)30 CloudbreakException (com.sequenceiq.cloudbreak.service.CloudbreakException)26 DetailedStackStatus (com.sequenceiq.cloudbreak.api.model.DetailedStackStatus)23 ArrayList (java.util.ArrayList)20 List (java.util.List)20 BadRequestException (com.sequenceiq.cloudbreak.controller.BadRequestException)18 HostGroup (com.sequenceiq.cloudbreak.domain.HostGroup)18 InstanceGroup (com.sequenceiq.cloudbreak.domain.InstanceGroup)18 HashMap (java.util.HashMap)18 HashSet (java.util.HashSet)18 Blueprint (com.sequenceiq.cloudbreak.domain.Blueprint)17 Map (java.util.Map)17 Matchers.anyString (org.mockito.Matchers.anyString)16 HostMetadata (com.sequenceiq.cloudbreak.domain.HostMetadata)15 Set (java.util.Set)15 CancellationException (com.sequenceiq.cloudbreak.cloud.scheduler.CancellationException)14