use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.
the class RecipeTemplateService method isGeneratedRecipesInDbStale.
/**
* Compare generated recipes from the database against on-the-fly generated recipes for every host groups
* If any of those will differ from the source (in database) or no recipe relation found for generated recipe, the result will be false.
*/
public boolean isGeneratedRecipesInDbStale(Set<HostGroup> hostGroups, Map<HostGroup, List<RecipeModel>> generatedModels) {
for (HostGroup hostGroup : hostGroups) {
Set<GeneratedRecipe> generatedRecipes = hostGroup.getGeneratedRecipes();
boolean hasRecipes = CollectionUtils.isNotEmpty(hostGroup.getRecipes());
boolean hasGeneratedRecipes = CollectionUtils.isNotEmpty(generatedRecipes);
boolean recipeModelsContainsHostGroup = MapUtils.isNotEmpty(generatedModels) && generatedModels.containsKey(hostGroup);
if (hasRecipes && !hasGeneratedRecipes) {
LOGGER.debug("No generated recipes found for host group '{}', but it has recipes. Recipes should be uploaded and regenerated.", hostGroup.getName());
return false;
} else if (!hasGeneratedRecipes) {
LOGGER.debug("No source and generated recipes found for host group '{}', skip comparing source and generated recipes.", hostGroup.getName());
continue;
} else if (!recipeModelsContainsHostGroup) {
LOGGER.debug("Generated recipe models do not contain host group {}. Recipes should be regenerated.", hostGroup.getName());
return false;
}
boolean anyWithoutRecipeSource = generatedRecipes.stream().anyMatch(g -> g.getRecipe() == null);
if (anyWithoutRecipeSource) {
LOGGER.debug("Not found recipe source for generated recipe. Recipes should be uploaded and regenerated.");
return false;
}
Map<String, GeneratedRecipe> generatedRecipeNameMap = generatedRecipes.stream().collect(Collectors.toMap(g -> g.getRecipe().getName(), g -> g, (g1, g2) -> g1));
List<RecipeModel> recipeModelList = generatedModels.get(hostGroup);
if (generatedRecipes.size() != recipeModelList.size()) {
LOGGER.debug("Source and generated recipe counts are not matching for host group '{}'. Recipes should be uploaded and regenerated.", hostGroup.getName());
return false;
}
if (isRecipeUpToDateInHostGroup(hostGroup, generatedRecipeNameMap, recipeModelList)) {
LOGGER.debug("Recipes matches for host group '{}'", hostGroup.getName());
} else {
return false;
}
}
return true;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.
the class UpdateRecipeService method refreshRecipesForCluster.
/**
* Updating recipes for an existing cluster. The input should contain host group - recipes mapping
* If a host group key from the mappings is missing from the input, that is not going to be updated.
* (or both - that is the default). Output is the newly attached/detached recipes in db.
*/
public UpdateRecipesV4Response refreshRecipesForCluster(Long workspaceId, Stack stack, List<UpdateHostGroupRecipes> recipesPerHostGroup) {
Set<String> recipesToFind = recipesPerHostGroup.stream().flatMap(rphg -> rphg.getRecipeNames().stream()).collect(Collectors.toSet());
Map<String, Set<String>> recipesToUpdate = recipesPerHostGroup.stream().collect(Collectors.toMap(UpdateHostGroupRecipes::getHostGroupName, UpdateHostGroupRecipes::getRecipeNames, (n1, n2) -> n1));
LOGGER.debug("Update recipes {}", recipesToUpdate);
Set<Recipe> recipes = recipeService.getByNamesForWorkspaceId(recipesToFind, workspaceId);
validate(recipesToFind, recipes);
Set<HostGroup> hostGroups = hostGroupService.getByClusterWithRecipes(stack.getCluster().getId());
UpdateRecipesV4Response result = updateRecipesForHostGroups(recipesToUpdate, recipes, hostGroups);
LOGGER.debug("Update recipes result: {}", result);
return result;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.
the class UpdateRecipeService method updateRecipeForCluster.
private void updateRecipeForCluster(Long workspaceId, Stack stack, String recipeName, String hostGroupName, boolean detach) {
Recipe recipe = recipeService.getByNameForWorkspaceId(recipeName, workspaceId);
HostGroup hostGroup = hostGroupService.getByClusterIdAndNameWithRecipes(stack.getCluster().getId(), hostGroupName);
if (hostGroup == null) {
throw new NotFoundException(String.format("Host group '%s' not found for workspace", hostGroupName));
}
Set<Recipe> existingRecipes = hostGroup.getRecipes();
Set<String> existingRecipeNames = existingRecipes.stream().map(Recipe::getName).collect(Collectors.toSet());
if (detach) {
detachRecipeFromHostGroup(recipe, hostGroup, existingRecipeNames);
} else {
attachRecipeToHostGroup(recipe, hostGroup, existingRecipeNames);
}
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.
the class UpdateRecipeService method updateRecipesForHostGroups.
private UpdateRecipesV4Response updateRecipesForHostGroups(Map<String, Set<String>> recipesToUpdate, Set<Recipe> recipes, Set<HostGroup> hostGroups) {
UpdateRecipesV4Response result = new UpdateRecipesV4Response();
for (HostGroup hostGroup : hostGroups) {
UpdateHostGroupRecipesPair updatePairs = doHostGroupRecipeUpdate(recipesToUpdate, recipes, hostGroup);
updatePairs.getRecipesToAttach().ifPresent(a -> result.getRecipesAttached().add(a));
updatePairs.getRecipesToDetach().ifPresent(d -> result.getRecipesDetached().add(d));
}
return result;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.host.HostGroup in project cloudbreak by hortonworks.
the class ClouderaManagerDecomissioner method collectHostsToRemove.
public Map<String, InstanceMetaData> collectHostsToRemove(Stack stack, HostGroup hostGroup, Set<String> hostNames, ApiClient client) {
Set<InstanceMetaData> hostsInHostGroup = hostGroup.getInstanceGroup().getNotTerminatedInstanceMetaDataSet();
Map<String, InstanceMetaData> hostsToRemove = hostsInHostGroup.stream().filter(hostMetadata -> hostNames.contains(hostMetadata.getDiscoveryFQDN())).collect(Collectors.toMap(InstanceMetaData::getDiscoveryFQDN, hostMetadata -> hostMetadata));
if (hostsToRemove.size() != hostNames.size()) {
List<String> missingHosts = hostNames.stream().filter(h -> !hostsToRemove.containsKey(h)).collect(Collectors.toList());
LOGGER.debug("Not all requested hosts found in CB for host group: {}. MissingCount={}, missingHosts=[{}]. Requested hosts: [{}]", hostGroup.getName(), missingHosts.size(), missingHosts, hostNames);
}
HostsResourceApi hostsResourceApi = clouderaManagerApiFactory.getHostsResourceApi(client);
try {
ApiHostList hostRefList = hostsResourceApi.readHosts(null, null, SUMMARY_REQUEST_VIEW);
List<String> runningHosts = hostRefList.getItems().stream().map(ApiHost::getHostname).collect(Collectors.toList());
// TODO: what if i remove a node from CM manually?
List<String> matchingCmHosts = hostsToRemove.keySet().stream().filter(hostName -> runningHosts.contains(hostName)).collect(Collectors.toList());
Set<String> matchingCmHostSet = new HashSet<>(matchingCmHosts);
if (matchingCmHosts.size() != hostsToRemove.size()) {
List<String> missingHostsInCm = hostsToRemove.keySet().stream().filter(h -> !matchingCmHostSet.contains(h)).collect(Collectors.toList());
LOGGER.debug("Not all requested hosts found in CM. MissingCount={}, missingHosts=[{}]. Requested hosts: [{}]", missingHostsInCm.size(), missingHostsInCm, hostsToRemove.keySet());
}
Sets.newHashSet(hostsToRemove.keySet()).stream().filter(hostName -> !matchingCmHostSet.contains(hostName)).forEach(hostsToRemove::remove);
LOGGER.debug("Collected hosts to remove: [{}]", hostsToRemove);
return hostsToRemove;
} catch (ApiException e) {
LOGGER.error("Failed to get host list for cluster: {}", stack.getName(), e);
throw new CloudbreakServiceException(e.getMessage(), e);
}
}
Aggregations