use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterUpscaleService method stopComponents.
public void stopComponents(Long stackId, Map<String, String> components, String hostname) throws CloudbreakException {
Stack stack = stackService.getByIdWithListsInTransaction(stackId);
LOGGER.info("Start stop components in ambari on host {}", hostname);
getClusterConnector(stack).stopComponents(components, hostname);
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class UpgradeDatalakeFlowEventChainFactory method addUpgradeValidationToChain.
private void addUpgradeValidationToChain(ClusterUpgradeTriggerEvent event, Queue<Selectable> flowEventChain) {
if (upgradeValidationEnabled) {
Stack stack = stackService.getById(event.getResourceId());
boolean lockComponents = lockedComponentService.isComponentsLocked(stack, event.getImageId());
flowEventChain.add(new ClusterUpgradeValidationTriggerEvent(event.getResourceId(), event.accepted(), event.getImageId(), lockComponents));
}
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class PillarConfigUpdateService method doConfigUpdate.
public void doConfigUpdate(Long stackId) {
stackUpdater.updateStackStatus(stackId, DetailedStackStatus.BOOTSTRAPPING_MACHINES);
flowMessageService.fireEventAndLog(stackId, UPDATE_IN_PROGRESS.name(), CLUSTER_PILLAR_CONFIG_UPDATE_STARTED);
Stack stack = stackService.getByIdWithClusterInTransaction(stackId);
Long clusterId = stack.getCluster().getId();
Cluster cluster = clusterService.findOneWithLists(clusterId).orElseThrow(NotFoundException.notFound("Cluster", clusterId));
clusterHostServiceRunner.updateClusterConfigs(stack, cluster);
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterDownscaleService method updateMetadataStatusToFailed.
public void updateMetadataStatusToFailed(DecommissionResult payload) {
if (payload.getErrorPhase() != null) {
Stack stack = stackService.getByIdWithListsInTransaction(payload.getResourceId());
for (String hostName : payload.getHostNames()) {
instanceMetaDataService.findByHostname(stack.getId(), hostName).ifPresent(instanceMetaData -> {
instanceMetaDataService.updateInstanceStatus(instanceMetaData, InstanceStatus.DECOMMISSION_FAILED, payload.getStatusReason());
});
}
String errorDetailes = String.format("The following hosts are '%s': %s", InstanceStatus.DECOMMISSION_FAILED, String.join(", ", payload.getHostNames()));
flowMessageService.fireEventAndLog(payload.getResourceId(), UPDATE_FAILED.name(), CLUSTER_SCALING_FAILED, "removed from", errorDetailes);
}
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class StopStartDownscaleActions method decommissionViaCmFailedAction.
@Bean(name = "STOPSTART_DOWNSCALE_DECOMMISSION_VIA_CM_FAILED_STATE")
public Action<?, ?> decommissionViaCmFailedAction() {
return new AbstractStopStartDownscaleActions<>(StopStartDownscaleDecommissionViaCMResult.class) {
@Override
protected void doExecute(StopStartDownscaleContext context, StopStartDownscaleDecommissionViaCMResult payload, Map<Object, Object> variables) throws Exception {
LOGGER.warn("Failure during the decommissionViaCm step");
// TODO CB-14929. Should the nodes be put into an ORCHESTRATOR_FAILED state? What are the manual recovery steps from this state.
Set<String> hostnames = getHostNamesForPrivateIds(payload.getRequest().getInstanceIdsToDecommission(), context.getStack());
stopStartDownscaleFlowService.decommissionViaCmFailed(payload.getResourceId(), hostnames);
sendEvent(context, STOPSTART_DOWNSCALE_FAILURE_EVENT.event(), new StackFailureEvent(payload.getResourceId(), payload.getErrorDetails()));
}
private Set<String> getHostNamesForPrivateIds(Set<Long> hostIdsToRemove, Stack stack) {
return hostIdsToRemove.stream().map(privateId -> {
Optional<InstanceMetaData> instanceMetadata = stackService.getInstanceMetadata(stack.getInstanceMetaDataAsList(), privateId);
return instanceMetadata.map(InstanceMetaData::getDiscoveryFQDN).orElse(null);
}).filter(StringUtils::isNotEmpty).collect(Collectors.toSet());
}
};
}
Aggregations