use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class ClusterHostServiceRunner method saveSharedRangerService.
private void saveSharedRangerService(Stack stack, Map<String, SaltPillarProperties> servicePillar) {
Long datalakeId = stack.getDatalakeId();
if (datalakeId != null) {
Stack dataLakeStack = stackRepository.findOne(datalakeId);
Cluster dataLakeCluster = dataLakeStack.getCluster();
Set<String> groupNames = blueprintProcessorFactory.get(dataLakeCluster.getBlueprint().getBlueprintText()).getHostGroupsWithComponent("RANGER_ADMIN");
List<HostGroup> groups = dataLakeCluster.getHostGroups().stream().filter(hg -> groupNames.contains(hg.getName())).collect(Collectors.toList());
Set<String> hostNames = new HashSet<>();
groups.forEach(hg -> hostNames.addAll(hg.getHostMetadata().stream().map(HostMetadata::getHostName).collect(Collectors.toList())));
Map<String, Object> rangerMap = new HashMap<>();
rangerMap.put("servers", hostNames);
rangerMap.put("port", "6080");
servicePillar.put("datalake-services", new SaltPillarProperties("/datalake/init.sls", singletonMap("datalake-services", singletonMap("ranger", rangerMap))));
}
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class ClusterSyncHandler method accept.
@Override
public void accept(Event<ClusterSyncRequest> event) {
ClusterSyncRequest request = event.getData();
ClusterSyncResult result;
try {
Stack stack = stackService.getByIdWithLists(request.getStackId());
String proxyIp = stackUtil.extractAmbariIp(stack);
String contextPath = stack.getCluster().getGateway().getPath();
proxyRegistrator.register(stack.getName(), contextPath, proxyIp);
Cluster cluster = clusterService.retrieveClusterByStackId(request.getStackId());
ambariClusterStatusUpdater.updateClusterStatus(stack, cluster);
result = new ClusterSyncResult(request);
} catch (Exception e) {
result = new ClusterSyncResult(e.getMessage(), e, request);
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class DependecyDeletionService method deleteDependencies.
public void deleteDependencies(Stack stack) {
deleteNetwork(stack.getNetwork());
deleteCredential(stack.getCredential());
for (InstanceGroup instanceGroup : stack.getInstanceGroups()) {
deleteSecurityGroup(instanceGroup.getSecurityGroup());
deleteTemplate(instanceGroup.getTemplate());
}
if (stack.getCluster() != null) {
Cluster cluster = stack.getCluster();
deleteBlueprint(cluster.getBlueprint());
Set<HostGroup> hostGroupsInCluster = hostGroupRepository.findHostGroupsInCluster(cluster.getId());
for (HostGroup hostGroup : hostGroupsInCluster) {
hostGroup.getRecipes().forEach(this::deleteRecipe);
}
}
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class ClusterCreationSetupService method prepare.
public Cluster prepare(ClusterRequest request, Stack stack, Blueprint blueprint, IdentityUser user) throws Exception {
String stackName = stack.getName();
long start = System.currentTimeMillis();
Cluster cluster = conversionService.convert(request, Cluster.class);
cluster.setStack(stack);
LOGGER.info("Cluster conversion took {} ms for stack {}", System.currentTimeMillis() - start, stackName);
start = System.currentTimeMillis();
cluster = clusterDecorator.decorate(cluster, request, blueprint, user, stack);
LOGGER.info("Cluster object decorated in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
start = System.currentTimeMillis();
List<ClusterComponent> components = new ArrayList<>();
Set<Component> allComponent = componentConfigProvider.getAllComponentsByStackIdAndType(stack.getId(), Sets.newHashSet(ComponentType.AMBARI_REPO_DETAILS, ComponentType.HDP_REPO_DETAILS, ComponentType.IMAGE));
Optional<Component> stackAmbariRepoConfig = allComponent.stream().filter(c -> c.getComponentType().equals(ComponentType.AMBARI_REPO_DETAILS) && c.getName().equalsIgnoreCase(ComponentType.AMBARI_REPO_DETAILS.name())).findAny();
Optional<Component> stackHdpRepoConfig = allComponent.stream().filter(c -> c.getComponentType().equals(ComponentType.HDP_REPO_DETAILS) && c.getName().equalsIgnoreCase(ComponentType.HDP_REPO_DETAILS.name())).findAny();
Optional<Component> stackImageComponent = allComponent.stream().filter(c -> c.getComponentType().equals(ComponentType.IMAGE) && c.getName().equalsIgnoreCase(ComponentType.IMAGE.name())).findAny();
ClusterComponent ambariRepoConfig = determineAmbariRepoConfig(stackAmbariRepoConfig, request.getAmbariRepoDetailsJson(), stackImageComponent, cluster);
components.add(ambariRepoConfig);
ClusterComponent hdpRepoConfig = determineHDPRepoConfig(blueprint, stack.getId(), stackHdpRepoConfig, request, cluster, user, stackImageComponent);
components.add(hdpRepoConfig);
checkVDFFile(ambariRepoConfig, hdpRepoConfig, stackName);
LOGGER.info("Cluster components saved in {} ms for stack {}", System.currentTimeMillis() - start, stackName);
start = System.currentTimeMillis();
Cluster savedCluster = clusterService.create(user, stack, cluster, components);
LOGGER.info("Cluster object creation took {} ms for stack {}", System.currentTimeMillis() - start, stackName);
return savedCluster;
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class AmbariClusterService method updateStatus.
@Override
@Transactional(TxType.NEVER)
public void updateStatus(Long stackId, StatusRequest statusRequest) {
Stack stack = stackService.getByIdWithLists(stackId);
Cluster cluster = stack.getCluster();
if (cluster == null) {
throw new BadRequestException(String.format("There is no cluster installed on stack '%s'.", stack.getName()));
}
switch(statusRequest) {
case SYNC:
sync(stack);
break;
case STOPPED:
stop(stack, cluster);
break;
case STARTED:
start(stack, cluster);
break;
default:
throw new BadRequestException("Cannot update the status of cluster because status request not valid");
}
}
Aggregations