use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class AmbariClusterService method create.
@Override
@Transactional(TxType.NEVER)
public Cluster create(IdentityUser user, Stack stack, Cluster cluster, List<ClusterComponent> components) {
LOGGER.info("Cluster requested [BlueprintId: {}]", cluster.getBlueprint().getId());
String stackName = stack.getName();
if (stack.getCluster() != null) {
throw new BadRequestException(String.format("A cluster is already created on this stack! [cluster: '%s']", stack.getCluster().getName()));
}
long start = System.currentTimeMillis();
if (clusterRepository.findByNameInAccount(cluster.getName(), user.getAccount()) != null) {
throw new DuplicateKeyValueException(APIResourceType.CLUSTER, cluster.getName());
}
LOGGER.info("Cluster name collision check took {} ms for stack {}", System.currentTimeMillis() - start, stackName);
if (Status.CREATE_FAILED.equals(stack.getStatus())) {
throw new BadRequestException("Stack creation failed, cannot create cluster.");
}
start = System.currentTimeMillis();
for (HostGroup hostGroup : cluster.getHostGroups()) {
constraintRepository.save(hostGroup.getConstraint());
}
LOGGER.info("Host group constrainst saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
start = System.currentTimeMillis();
if (cluster.getFileSystem() != null) {
fileSystemRepository.save(cluster.getFileSystem());
}
LOGGER.info("Filesystem config saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
if (cluster.getKerberosConfig() != null) {
kerberosConfigRepository.save(cluster.getKerberosConfig());
}
cluster.setStack(stack);
cluster.setOwner(user.getUserId());
cluster.setAccount(user.getAccount());
stack.setCluster(cluster);
start = System.currentTimeMillis();
generateSignKeys(cluster.getGateway());
LOGGER.info("Sign key generated in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
try {
start = System.currentTimeMillis();
cluster = clusterRepository.save(cluster);
LOGGER.info("Cluster object saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
clusterComponentConfigProvider.store(components, cluster);
} catch (DataIntegrityViolationException ex) {
String msg = String.format("Error with resource [%s], error: [%s]", APIResourceType.CLUSTER, getProperSqlErrorMessage(ex));
throw new BadRequestException(msg);
}
if (stack.isAvailable()) {
flowManager.triggerClusterInstall(stack.getId());
InMemoryStateStore.putCluster(cluster.getId(), statusToPollGroupConverter.convert(cluster.getStatus()));
if (InMemoryStateStore.getStack(stack.getId()) == null) {
InMemoryStateStore.putStack(stack.getId(), statusToPollGroupConverter.convert(stack.getStatus()));
}
}
return cluster;
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class AmbariClusterService method validateRequest.
private boolean validateRequest(Stack stack, HostGroupAdjustmentJson hostGroupAdjustment) {
HostGroup hostGroup = getHostGroup(stack, hostGroupAdjustment);
int scalingAdjustment = hostGroupAdjustment.getScalingAdjustment();
boolean downScale = scalingAdjustment < 0;
if (scalingAdjustment == 0) {
throw new BadRequestException("No scaling adjustments specified. Nothing to do.");
}
blueprintValidator.validateHostGroupScalingRequest(stack.getCluster().getBlueprint(), hostGroup, scalingAdjustment);
if (!downScale && hostGroup.getConstraint().getInstanceGroup() != null) {
validateUnusedHosts(hostGroup.getConstraint().getInstanceGroup(), scalingAdjustment);
} else {
validateRegisteredHosts(stack, hostGroupAdjustment);
if (hostGroupAdjustment.getWithStackUpdate() && hostGroupAdjustment.getScalingAdjustment() > 0) {
throw new BadRequestException("ScalingAdjustment has to be decommission if you define withStackUpdate = 'true'.");
}
}
return downScale;
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class AmbariClusterService method updateHostMetadata.
@Override
public void updateHostMetadata(Long clusterId, Map<String, List<String>> hostsPerHostGroup, HostMetadataState hostMetadataState) {
for (Entry<String, List<String>> hostGroupEntry : hostsPerHostGroup.entrySet()) {
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(clusterId, hostGroupEntry.getKey());
if (hostGroup != null) {
Set<String> existingHosts = hostMetadataRepository.findEmptyHostsInHostGroup(hostGroup.getId()).stream().map(HostMetadata::getHostName).collect(Collectors.toSet());
hostGroupEntry.getValue().stream().filter(hostName -> !existingHosts.contains(hostName)).forEach(hostName -> {
HostMetadata hostMetadataEntry = new HostMetadata();
hostMetadataEntry.setHostName(hostName);
hostMetadataEntry.setHostGroup(hostGroup);
hostMetadataEntry.setHostMetadataState(hostMetadataState);
hostGroup.getHostMetadata().add(hostMetadataEntry);
});
hostGroupService.save(hostGroup);
}
}
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class ClusterDecorator method convertHostGroupsFromJson.
private Set<HostGroup> convertHostGroupsFromJson(Stack stack, IdentityUser user, Cluster cluster, Iterable<HostGroupRequest> hostGroupsJsons) {
Set<HostGroup> hostGroups = new HashSet<>();
for (HostGroupRequest json : hostGroupsJsons) {
HostGroup hostGroup = conversionService.convert(json, HostGroup.class);
hostGroup.setCluster(cluster);
hostGroup = hostGroupDecorator.decorate(hostGroup, json, user, stack.getId(), true, stack.isPublicInAccount());
hostGroups.add(hostGroup);
}
return hostGroups;
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class HostGroupDecorator method getHostGroup.
private HostGroup getHostGroup(Long stackId, Constraint constraint, ConstraintJson constraintJson, HostGroup subject, IdentityUser user) {
if (constraintJson == null) {
throw new BadRequestException("The constraint field must be set in the reinstall request!");
}
HostGroup result = subject;
String instanceGroupName = constraintJson.getInstanceGroupName();
String constraintTemplateName = constraintJson.getConstraintTemplateName();
Cluster cluster = clusterService.retrieveClusterByStackId(stackId);
Constraint decoratedConstraint = decorateConstraint(stackId, user, constraint, instanceGroupName, constraintTemplateName);
if (!isEmpty(instanceGroupName)) {
result = getHostGroupByInstanceGroupName(decoratedConstraint, subject, cluster, instanceGroupName);
} else if (!isEmpty(constraintTemplateName)) {
subject.setConstraint(constraintRepository.save(constraint));
} else {
throw new BadRequestException("The constraint field must contain the 'constraintTemplateName' or 'instanceGroupName' parameter!");
}
return result;
}
Aggregations