use of com.sequenceiq.cloudbreak.controller.BadRequestException in project cloudbreak by hortonworks.
the class AmbariClusterService method start.
private void start(Stack stack, Cluster cluster) {
if (stack.isStartInProgress()) {
String message = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_START_REQUESTED.code());
eventService.fireCloudbreakEvent(stack.getId(), START_REQUESTED.name(), message);
updateClusterStatusByStackId(stack.getId(), START_REQUESTED);
} else {
if (cluster.isAvailable()) {
String statusDesc = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_START_IGNORED.code());
LOGGER.info(statusDesc);
eventService.fireCloudbreakEvent(stack.getId(), stack.getStatus().name(), statusDesc);
} else if (!cluster.isClusterReadyForStart() && !cluster.isStartFailed()) {
throw new BadRequestException(String.format("Cannot update the status of cluster '%s' to STARTED, because it isn't in STOPPED state.", cluster.getId()));
} else if (!stack.isAvailable() && !cluster.isStartFailed()) {
throw new BadRequestException(String.format("Cannot update the status of cluster '%s' to STARTED, because the stack is not AVAILABLE", cluster.getId()));
} else {
updateClusterStatusByStackId(stack.getId(), START_REQUESTED);
flowManager.triggerClusterStart(stack.getId());
}
}
}
use of com.sequenceiq.cloudbreak.controller.BadRequestException in project cloudbreak by hortonworks.
the class AmbariClusterService method recreate.
@Override
public Cluster recreate(Long stackId, Long blueprintId, Set<HostGroup> hostGroups, boolean validateBlueprint, StackRepoDetails stackRepoDetails, String kerberosPassword, String kerberosPrincipal) {
if (blueprintId == null || hostGroups == null) {
throw new BadRequestException("Blueprint id and hostGroup assignments can not be null.");
}
Stack stack = stackService.getByIdWithLists(stackId);
Cluster cluster = getCluster(stack);
if (cluster != null && stack.getCluster().isSecure()) {
List<String> missing = Stream.of(Pair.of("password", kerberosPassword), Pair.of("principal", kerberosPrincipal)).filter(p -> !StringUtils.hasLength(p.getRight())).map(Pair::getLeft).collect(Collectors.toList());
if (!missing.isEmpty()) {
throw new BadRequestException(String.format("Missing Kerberos credential detail(s): %s", String.join(", ", missing)));
}
KerberosConfig kerberosConfig = cluster.getKerberosConfig();
kerberosConfig.setPassword(kerberosPassword);
kerberosConfig.setPrincipal(kerberosPrincipal);
kerberosConfigRepository.save(kerberosConfig);
}
Blueprint blueprint = blueprintService.get(blueprintId);
if (!withEmbeddedAmbariDB(cluster)) {
throw new BadRequestException("Ambari doesn't support resetting external DB automatically. To reset Ambari Server schema you must first drop " + "and then create it using DDL scripts from /var/lib/ambari-server/resources");
}
if (validateBlueprint) {
blueprintValidator.validateBlueprintForStack(cluster, blueprint, hostGroups, stack.getInstanceGroups());
}
Boolean containerOrchestrator;
try {
containerOrchestrator = orchestratorTypeResolver.resolveType(stack.getOrchestrator()).containerOrchestrator();
} catch (CloudbreakException ignored) {
containerOrchestrator = false;
}
if (containerOrchestrator) {
clusterTerminationService.deleteClusterComponents(cluster.getId());
cluster = clusterRepository.findById(stack.getCluster().getId());
}
hostGroups = hostGroupService.saveOrUpdateWithMetadata(hostGroups, cluster);
cluster = prepareCluster(hostGroups, stackRepoDetails, blueprint, stack, cluster);
try {
triggerClusterInstall(stack, cluster);
} catch (CloudbreakException e) {
throw new CloudbreakServiceException(e);
}
return stack.getCluster();
}
use of com.sequenceiq.cloudbreak.controller.BadRequestException in project cloudbreak by hortonworks.
the class AmbariClusterService method create.
@Override
@Transactional(TxType.NEVER)
public Cluster create(IdentityUser user, Stack stack, Cluster cluster, List<ClusterComponent> components) {
LOGGER.info("Cluster requested [BlueprintId: {}]", cluster.getBlueprint().getId());
String stackName = stack.getName();
if (stack.getCluster() != null) {
throw new BadRequestException(String.format("A cluster is already created on this stack! [cluster: '%s']", stack.getCluster().getName()));
}
long start = System.currentTimeMillis();
if (clusterRepository.findByNameInAccount(cluster.getName(), user.getAccount()) != null) {
throw new DuplicateKeyValueException(APIResourceType.CLUSTER, cluster.getName());
}
LOGGER.info("Cluster name collision check took {} ms for stack {}", System.currentTimeMillis() - start, stackName);
if (Status.CREATE_FAILED.equals(stack.getStatus())) {
throw new BadRequestException("Stack creation failed, cannot create cluster.");
}
start = System.currentTimeMillis();
for (HostGroup hostGroup : cluster.getHostGroups()) {
constraintRepository.save(hostGroup.getConstraint());
}
LOGGER.info("Host group constrainst saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
start = System.currentTimeMillis();
if (cluster.getFileSystem() != null) {
fileSystemRepository.save(cluster.getFileSystem());
}
LOGGER.info("Filesystem config saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
if (cluster.getKerberosConfig() != null) {
kerberosConfigRepository.save(cluster.getKerberosConfig());
}
cluster.setStack(stack);
cluster.setOwner(user.getUserId());
cluster.setAccount(user.getAccount());
stack.setCluster(cluster);
start = System.currentTimeMillis();
generateSignKeys(cluster.getGateway());
LOGGER.info("Sign key generated in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
try {
start = System.currentTimeMillis();
cluster = clusterRepository.save(cluster);
LOGGER.info("Cluster object saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
clusterComponentConfigProvider.store(components, cluster);
} catch (DataIntegrityViolationException ex) {
String msg = String.format("Error with resource [%s], error: [%s]", APIResourceType.CLUSTER, getProperSqlErrorMessage(ex));
throw new BadRequestException(msg);
}
if (stack.isAvailable()) {
flowManager.triggerClusterInstall(stack.getId());
InMemoryStateStore.putCluster(cluster.getId(), statusToPollGroupConverter.convert(cluster.getStatus()));
if (InMemoryStateStore.getStack(stack.getId()) == null) {
InMemoryStateStore.putStack(stack.getId(), statusToPollGroupConverter.convert(stack.getStatus()));
}
}
return cluster;
}
use of com.sequenceiq.cloudbreak.controller.BadRequestException in project cloudbreak by hortonworks.
the class AmbariClusterService method stop.
private void stop(Stack stack, Cluster cluster) {
StopRestrictionReason reason = stack.isInfrastructureStoppable();
if (cluster.isStopped()) {
String statusDesc = cloudbreakMessagesService.getMessage(Msg.AMBARI_CLUSTER_STOP_IGNORED.code());
LOGGER.info(statusDesc);
eventService.fireCloudbreakEvent(stack.getId(), stack.getStatus().name(), statusDesc);
} else if (reason != StopRestrictionReason.NONE) {
throw new BadRequestException(String.format("Cannot stop a cluster '%s'. Reason: %s", cluster.getId(), reason.getReason()));
} else if (!cluster.isClusterReadyForStop() && !cluster.isStopFailed()) {
throw new BadRequestException(String.format("Cannot update the status of cluster '%s' to STOPPED, because it isn't in AVAILABLE state.", cluster.getId()));
} else if (!stack.isStackReadyForStop() && !stack.isStopFailed()) {
throw new BadRequestException(String.format("Cannot update the status of cluster '%s' to STARTED, because the stack is not AVAILABLE", cluster.getId()));
} else if (cluster.isAvailable() || cluster.isStopFailed()) {
updateClusterStatusByStackId(stack.getId(), STOP_REQUESTED);
flowManager.triggerClusterStop(stack.getId());
}
}
use of com.sequenceiq.cloudbreak.controller.BadRequestException in project cloudbreak by hortonworks.
the class AmbariClusterService method createHDPRepoComponent.
private void createHDPRepoComponent(StackRepoDetails stackRepoDetailsUpdate, Stack stack) {
if (stackRepoDetailsUpdate != null) {
StackRepoDetails stackRepoDetails = clusterComponentConfigProvider.getHDPRepo(stack.getCluster().getId());
if (stackRepoDetails == null) {
try {
ClusterComponent clusterComp = new ClusterComponent(ComponentType.HDP_REPO_DETAILS, new Json(stackRepoDetailsUpdate), stack.getCluster());
clusterComponentConfigProvider.store(clusterComp);
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("HDP Repo parameters cannot be converted. %s", stackRepoDetailsUpdate));
}
} else {
ClusterComponent component = clusterComponentConfigProvider.getComponent(stack.getCluster().getId(), ComponentType.HDP_REPO_DETAILS);
stackRepoDetails.setHdpVersion(stackRepoDetailsUpdate.getHdpVersion());
stackRepoDetails.setVerify(stackRepoDetailsUpdate.isVerify());
stackRepoDetails.setStack(stackRepoDetailsUpdate.getStack());
stackRepoDetails.setUtil(stackRepoDetailsUpdate.getUtil());
stackRepoDetails.setEnableGplRepo(stackRepoDetailsUpdate.isEnableGplRepo());
stackRepoDetails.setKnox(stackRepoDetailsUpdate.getKnox());
try {
component.setAttributes(new Json(stackRepoDetails));
clusterComponentConfigProvider.store(component);
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("HDP Repo parameters cannot be converted. %s", stackRepoDetailsUpdate));
}
}
}
}
Aggregations