use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class AmbariClusterService method updateUserNamePassword.
@Override
public void updateUserNamePassword(Long stackId, UserNamePasswordJson userNamePasswordJson) {
Stack stack = stackService.get(stackId);
Cluster cluster = stack.getCluster();
String oldUserName = cluster.getUserName();
String oldPassword = cluster.getPassword();
String newUserName = userNamePasswordJson.getUserName();
String newPassword = userNamePasswordJson.getPassword();
if (!newUserName.equals(oldUserName)) {
flowManager.triggerClusterCredentialReplace(stack.getId(), userNamePasswordJson.getUserName(), userNamePasswordJson.getPassword());
} else if (!newPassword.equals(oldPassword)) {
flowManager.triggerClusterCredentialUpdate(stack.getId(), userNamePasswordJson.getPassword());
} else {
throw new BadRequestException("The request may not change credential");
}
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class AmbariClusterService method failureReport.
@Override
public void failureReport(Long stackId, List<String> failedNodes) {
Stack stack = stackService.get(stackId);
Cluster cluster = stack.getCluster();
Map<String, List<String>> autoRecoveryNodesMap = new HashMap<>();
Map<String, HostMetadata> autoRecoveryHostMetadata = new HashMap<>();
Map<String, HostMetadata> failedHostMetadata = new HashMap<>();
for (String failedNode : failedNodes) {
HostMetadata hostMetadata = hostMetadataRepository.findHostInClusterByName(cluster.getId(), failedNode);
if (hostMetadata == null) {
throw new BadRequestException("No metadata information for the node: " + failedNode);
}
HostGroup hostGroup = hostMetadata.getHostGroup();
if (hostGroup.getRecoveryMode() == RecoveryMode.AUTO) {
validateRepair(stack, hostMetadata);
}
String hostGroupName = hostGroup.getName();
if (hostGroup.getRecoveryMode() == RecoveryMode.AUTO) {
List<String> nodeList = autoRecoveryNodesMap.get(hostGroupName);
if (nodeList == null) {
validateComponentsCategory(stack, hostGroupName);
nodeList = new ArrayList<>();
autoRecoveryNodesMap.put(hostGroupName, nodeList);
}
nodeList.add(failedNode);
autoRecoveryHostMetadata.put(failedNode, hostMetadata);
} else if (hostGroup.getRecoveryMode() == RecoveryMode.MANUAL) {
failedHostMetadata.put(failedNode, hostMetadata);
}
}
if (!autoRecoveryNodesMap.isEmpty()) {
flowManager.triggerClusterRepairFlow(stackId, autoRecoveryNodesMap, false);
String recoveryMessage = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_AUTORECOVERY_REQUESTED.code(), Collections.singletonList(autoRecoveryNodesMap));
updateChangedHosts(cluster, autoRecoveryHostMetadata, HostMetadataState.HEALTHY, HostMetadataState.WAITING_FOR_REPAIR, recoveryMessage);
}
if (!failedHostMetadata.isEmpty()) {
String recoveryMessage = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_FAILED_NODES_REPORTED.code(), Collections.singletonList(failedHostMetadata.keySet()));
updateChangedHosts(cluster, failedHostMetadata, HostMetadataState.HEALTHY, HostMetadataState.UNHEALTHY, recoveryMessage);
}
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class AmbariClusterService method updateClusterMetadata.
@Override
@Transactional(TxType.NEVER)
public Cluster updateClusterMetadata(Long stackId) {
Stack stack = stackService.getById(stackId);
AmbariClient ambariClient = getAmbariClient(stack);
Map<String, Integer> hostGroupCounter = new HashMap<>();
Set<HostMetadata> hosts = hostMetadataRepository.findHostsInCluster(stack.getCluster().getId());
Map<String, String> hostStatuses = ambariClient.getHostStatuses();
for (HostMetadata host : hosts) {
if (hostStatuses.containsKey(host.getHostName())) {
String hgName = host.getHostGroup().getName();
Integer hgCounter = hostGroupCounter.getOrDefault(hgName, 0) + 1;
hostGroupCounter.put(hgName, hgCounter);
HostMetadataState newState = HostMetadataState.HEALTHY.name().equals(hostStatuses.get(host.getHostName())) ? HostMetadataState.HEALTHY : HostMetadataState.UNHEALTHY;
boolean stateChanged = updateHostMetadataByHostState(stack, host.getHostName(), newState);
if (stateChanged && HostMetadataState.HEALTHY == newState) {
updateInstanceMetadataStateToRegistered(stackId, host);
}
}
}
hostGroupCounter(stack.getCluster().getId(), hostGroupCounter);
return stack.getCluster();
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class AmbariClusterService method repairCluster.
@Override
public void repairCluster(Long stackId, List<String> repairedHostGroups, boolean removeOnly) {
Stack stack = stackService.get(stackId);
Cluster cluster = stack.getCluster();
Set<HostGroup> hostGroups = hostGroupService.getByCluster(cluster.getId());
Map<String, List<String>> failedNodeMap = new HashMap<>();
for (HostGroup hg : hostGroups) {
List<String> failedNodes = new ArrayList<>();
if (repairedHostGroups.contains(hg.getName()) && hg.getRecoveryMode() == RecoveryMode.MANUAL) {
for (HostMetadata hmd : hg.getHostMetadata()) {
if (hmd.getHostMetadataState() == HostMetadataState.UNHEALTHY) {
validateRepair(stack, hmd);
if (!failedNodeMap.containsKey(hg.getName())) {
failedNodeMap.put(hg.getName(), failedNodes);
}
failedNodes.add(hmd.getHostName());
}
}
}
}
if (!failedNodeMap.isEmpty()) {
flowManager.triggerClusterRepairFlow(stackId, failedNodeMap, removeOnly);
String recoveryMessage = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_MANUALRECOVERY_REQUESTED.code(), Collections.singletonList(repairedHostGroups));
LOGGER.info(recoveryMessage);
eventService.fireCloudbreakEvent(stack.getId(), "RECOVERY", recoveryMessage);
}
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class AmbariClusterService method upgrade.
@Override
public void upgrade(Long stackId, AmbariRepo ambariRepoUpgrade) {
if (ambariRepoUpgrade != null) {
Stack stack = stackService.getByIdWithLists(stackId);
Cluster cluster = clusterRepository.findById(stack.getCluster().getId());
if (cluster == null) {
throw new BadRequestException(String.format("Cluster does not exist on stack with '%s' id.", stackId));
}
if (!stack.isAvailable()) {
throw new BadRequestException(String.format("Stack '%s' is currently in '%s' state. Upgrade requests to a cluster can only be made if the underlying stack is 'AVAILABLE'.", stackId, stack.getStatus()));
}
if (!cluster.isAvailable()) {
throw new BadRequestException(String.format("Cluster '%s' is currently in '%s' state. Upgrade requests to a cluster can only be made if the underlying stack is 'AVAILABLE'.", stackId, stack.getStatus()));
}
AmbariRepo ambariRepo = clusterComponentConfigProvider.getAmbariRepo(cluster.getId());
if (ambariRepo == null) {
try {
clusterComponentConfigProvider.store(new ClusterComponent(ComponentType.AMBARI_REPO_DETAILS, new Json(ambariRepoUpgrade), stack.getCluster()));
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("Ambari repo details cannot be saved. %s", ambariRepoUpgrade));
}
} else {
ClusterComponent component = clusterComponentConfigProvider.getComponent(cluster.getId(), ComponentType.AMBARI_REPO_DETAILS);
ambariRepo.setBaseUrl(ambariRepoUpgrade.getBaseUrl());
ambariRepo.setGpgKeyUrl(ambariRepoUpgrade.getGpgKeyUrl());
ambariRepo.setPredefined(false);
ambariRepo.setVersion(ambariRepoUpgrade.getVersion());
try {
component.setAttributes(new Json(ambariRepo));
clusterComponentConfigProvider.store(component);
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("Ambari repo details cannot be saved. %s", ambariRepoUpgrade));
}
}
try {
flowManager.triggerClusterUpgrade(stack.getId());
} catch (RuntimeException e) {
throw new CloudbreakServiceException(e);
}
}
}
Aggregations