use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class RecipeEngine method addFsRecipesToHostGroups.
private void addFsRecipesToHostGroups(Credential credential, Iterable<HostGroup> hostGroups, String blueprintText, FileSystem fs) throws IOException {
String scriptName = fs.getType().toLowerCase();
FileSystemConfigurator fsConfigurator = fileSystemConfigurators.get(FileSystemType.valueOf(fs.getType()));
FileSystemConfiguration fsConfiguration = getFileSystemConfiguration(fs);
List<RecipeScript> recipeScripts = fsConfigurator.getScripts(credential, fsConfiguration);
List<Recipe> fsRecipes = recipeBuilder.buildRecipes(scriptName, recipeScripts);
for (int i = 0; i < fsRecipes.size(); i++) {
RecipeScript recipeScript = recipeScripts.get(i);
Recipe recipe = fsRecipes.get(i);
for (HostGroup hostGroup : hostGroups) {
if (ExecutionType.ALL_NODES == recipeScript.getExecutionType()) {
hostGroup.addRecipe(recipe);
} else if (ExecutionType.ONE_NODE == recipeScript.getExecutionType() && isComponentPresent(blueprintText, "NAMENODE", hostGroup)) {
hostGroup.addRecipe(recipe);
break;
}
}
}
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class AmbariDecommissioner method collectDownscaleCandidates.
private Iterable<HostMetadata> collectDownscaleCandidates(Stack stack, Cluster cluster, String hostGroupName, Integer scalingAdjustment) {
List<HostMetadata> downScaleCandidates;
HttpClientConfig clientConfig = tlsSecurityService.buildTLSClientConfigForPrimaryGateway(stack.getId(), cluster.getAmbariIp());
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(cluster.getId(), hostGroupName);
Set<HostMetadata> hostsInHostGroup = hostGroup.getHostMetadata();
List<HostMetadata> filteredHostList = hostFilterService.filterHostsForDecommission(cluster, hostsInHostGroup, hostGroupName);
int reservedInstances = hostsInHostGroup.size() - filteredHostList.size();
String blueprintName = cluster.getBlueprint().getAmbariName();
AmbariClient ambariClient = ambariClientProvider.getAmbariClient(clientConfig, stack.getGatewayPort(), cluster);
if (ambariClient.getBlueprintMap(blueprintName).get(hostGroupName).contains(DATANODE)) {
int replication = getReplicationFactor(ambariClient, hostGroupName);
verifyNodeCount(replication, scalingAdjustment, filteredHostList.size(), reservedInstances);
downScaleCandidates = checkAndSortByAvailableSpace(stack, ambariClient, replication, scalingAdjustment, filteredHostList);
} else {
verifyNodeCount(NO_REPLICATION, scalingAdjustment, filteredHostList.size(), reservedInstances);
downScaleCandidates = filteredHostList;
}
return downScaleCandidates;
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class TestUtil method hostGroups.
public static Set<HostGroup> hostGroups(Cluster cluster) {
Set<HostGroup> hostGroups = new HashSet<>();
HostGroup hg = new HostGroup();
hg.setCluster(cluster);
hg.setId(1L);
hg.setName("slave_1");
hostGroups.add(hg);
return hostGroups;
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class TestUtil method hostGroup.
public static HostGroup hostGroup(String name, int count) {
HostGroup hostGroup = new HostGroup();
hostGroup.setId(1L);
hostGroup.setName(name);
hostGroup.setRecipes(recipes(1));
hostGroup.setHostMetadata(hostMetadata(hostGroup, count));
InstanceGroup instanceGroup = instanceGroup(1L, name, InstanceGroupType.CORE, gcpTemplate(1L), count);
Constraint constraint = new Constraint();
constraint.setInstanceGroup(instanceGroup);
constraint.setHostCount(instanceGroup.getNodeCount());
hostGroup.setConstraint(constraint);
hostGroup.setCluster(cluster(blueprint(), stack(), 1L));
hostGroup.setRecoveryMode(RecoveryMode.MANUAL);
return hostGroup;
}
use of com.sequenceiq.cloudbreak.domain.HostGroup in project cloudbreak by hortonworks.
the class DownscaleFlowEventChainFactory method createFlowTriggerEventQueue.
@Override
public Queue<Selectable> createFlowTriggerEventQueue(ClusterAndStackDownscaleTriggerEvent event) {
Queue<Selectable> flowEventChain = new ConcurrentLinkedQueue<>();
ClusterScaleTriggerEvent cste;
cste = event.getHostNames() == null ? new ClusterDownscaleTriggerEvent(DECOMMISSION_EVENT.event(), event.getStackId(), event.getHostGroupName(), event.getAdjustment(), event.accepted()) : new ClusterDownscaleTriggerEvent(DECOMMISSION_EVENT.event(), event.getStackId(), event.getHostGroupName(), event.getHostNames(), event.accepted());
flowEventChain.add(cste);
if (event.getScalingType() == ScalingType.DOWNSCALE_TOGETHER) {
StackView stackView = stackService.getByIdView(event.getStackId());
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stackView.getClusterView().getId(), event.getHostGroupName());
Constraint hostGroupConstraint = hostGroup.getConstraint();
String instanceGroupName = Optional.ofNullable(hostGroupConstraint.getInstanceGroup()).map(InstanceGroup::getGroupName).orElse(null);
StackScaleTriggerEvent sste;
sste = event.getHostNames() == null ? new StackDownscaleTriggerEvent(STACK_DOWNSCALE_EVENT.event(), event.getStackId(), instanceGroupName, event.getAdjustment()) : new StackDownscaleTriggerEvent(STACK_DOWNSCALE_EVENT.event(), event.getStackId(), instanceGroupName, event.getHostNames());
flowEventChain.add(sste);
}
return flowEventChain;
}
Aggregations