use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class MeteringAzureMetadataPatchService method upgradeMeteringOnNodes.
private boolean upgradeMeteringOnNodes(Stack stack) throws ExistingStackPatchApplyException, IOException, CloudbreakOrchestratorFailedException {
byte[] currentSaltState = getCurrentSaltStateStack(stack);
List<String> saltStateDefinitions = Arrays.asList("salt-common", "salt");
List<String> meteringSaltStateDef = List.of("/salt/metering");
byte[] meteringSaltStateConfig = compressUtil.generateCompressedOutputFromFolders(saltStateDefinitions, meteringSaltStateDef);
boolean meteringContentMatches = compressUtil.compareCompressedContent(currentSaltState, meteringSaltStateConfig, meteringSaltStateDef);
if (!meteringContentMatches) {
Set<InstanceMetaData> instanceMetaDataSet = instanceMetaDataService.findNotTerminatedAndNotZombieForStack(stack.getId());
List<GatewayConfig> gatewayConfigs = gatewayConfigService.getAllGatewayConfigs(stack);
ClusterDeletionBasedExitCriteriaModel exitModel = ClusterDeletionBasedExitCriteriaModel.nonCancellableModel();
getTelemetryOrchestrator().updateMeteringSaltDefinition(meteringSaltStateConfig, gatewayConfigs, exitModel);
Set<Node> availableNodes = getAvailableNodes(instanceMetaDataSet, gatewayConfigs, exitModel);
if (CollectionUtils.isEmpty(availableNodes)) {
LOGGER.info("Not found any available nodes for patch, stack: " + stack.getName());
return false;
} else {
getTelemetryOrchestrator().upgradeMetering(gatewayConfigs, availableNodes, exitModel, meteringAzureMetadataPatchConfig.getDateBefore(), meteringAzureMetadataPatchConfig.getCustomRpmUrl());
byte[] newFullSaltState = compressUtil.updateCompressedOutputFolders(saltStateDefinitions, meteringSaltStateDef, currentSaltState);
clusterBootstrapper.updateSaltComponent(stack, newFullSaltState);
LOGGER.debug("Metering partial salt refresh successfully finished for stack {}", stack.getName());
return true;
}
} else {
LOGGER.debug("Metering partial salt refresh is not required for stack {}", stack.getName());
return true;
}
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class ClusterUpgradeAvailabilityService method checkForUpgradesByName.
public UpgradeV4Response checkForUpgradesByName(Stack stack, boolean lockComponents, boolean replaceVms, InternalUpgradeSettings internalUpgradeSettings) {
UpgradeV4Response upgradeOptions = checkForUpgrades(stack, lockComponents, internalUpgradeSettings);
upgradeOptions.setReplaceVms(replaceVms);
if (StringUtils.isEmpty(upgradeOptions.getReason())) {
if (!stack.getStatus().isAvailable()) {
upgradeOptions.setReason(String.format("Cannot upgrade cluster because it is in %s state.", stack.getStatus()));
LOGGER.warn(upgradeOptions.getReason());
} else if (instanceMetaDataService.anyInstanceStopped(stack.getId())) {
upgradeOptions.setReason("Cannot upgrade cluster because there is stopped instance.");
LOGGER.warn(upgradeOptions.getReason());
} else if (shouldValidateForRepair(lockComponents, replaceVms)) {
LOGGER.debug("Validate for repair");
Result<Map<HostGroupName, Set<InstanceMetaData>>, RepairValidation> validationResult = clusterRepairService.repairWithDryRun(stack.getId());
if (validationResult.isError()) {
upgradeOptions.setReason(String.join(",", validationResult.getError().getValidationErrors()));
LOGGER.warn(String.format("Cannot upgrade cluster because: %s", upgradeOptions.getReason()));
}
}
}
return upgradeOptions;
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class ClouderaManagerModificationService method createUpscaledHostRefList.
private ApiHostRefList createUpscaledHostRefList(Map<String, InstanceMetaData> upscaleInstancesMap, Map<String, ApiHost> upscaleHostsMap) {
LOGGER.debug("Creating ApiHostRefList from upscaled hosts.");
ApiHostRefList body = new ApiHostRefList();
upscaleHostsMap.forEach((hostname, host) -> Optional.ofNullable(upscaleInstancesMap.get(hostname)).ifPresent(instance -> {
ApiHostRef apiHostRef = new ApiHostRef().hostname(instance.getDiscoveryFQDN()).hostId(host.getHostId());
body.addItemsItem(apiHostRef);
}));
if (body.getItems() != null) {
LOGGER.debug("Created ApiHostRefList from upscaled hosts. Host count: [{}]", body.getItems().size());
} else {
LOGGER.debug("Created ApiHostRefList is empty.");
}
return body;
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class ClouderaManagerDecomissioner method deleteHostFromClouderaManager.
private void deleteHostFromClouderaManager(Stack stack, InstanceMetaData data, ApiClient client) {
HostsResourceApi hostsResourceApi = clouderaManagerApiFactory.getHostsResourceApi(client);
try {
ApiHostList hostRefList = hostsResourceApi.readHosts(null, null, SUMMARY_REQUEST_VIEW);
Optional<ApiHost> hostRefOptional = hostRefList.getItems().stream().filter(host -> data.getDiscoveryFQDN() != null && data.getDiscoveryFQDN().equals(host.getHostname())).findFirst();
if (hostRefOptional.isPresent()) {
ApiHost hostRef = hostRefOptional.get();
ClustersResourceApi clustersResourceApi = clouderaManagerApiFactory.getClustersResourceApi(client);
clustersResourceApi.removeHost(stack.getName(), hostRef.getHostId());
hostsResourceApi.deleteHost(hostRef.getHostId());
LOGGER.debug("Host remove request sent. Host id: [{}]", hostRef.getHostId());
} else {
LOGGER.debug("Host already deleted.");
}
} catch (ApiException e) {
LOGGER.error("Failed to delete host: {}", data.getDiscoveryFQDN(), e);
throw new CloudbreakServiceException(e.getMessage(), e);
}
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceMetaData in project cloudbreak by hortonworks.
the class ClouderaManagerDecomissioner method collectHostsToRemove.
public Map<String, InstanceMetaData> collectHostsToRemove(Stack stack, HostGroup hostGroup, Set<String> hostNames, ApiClient client) {
Set<InstanceMetaData> hostsInHostGroup = hostGroup.getInstanceGroup().getNotTerminatedInstanceMetaDataSet();
Map<String, InstanceMetaData> hostsToRemove = hostsInHostGroup.stream().filter(hostMetadata -> hostNames.contains(hostMetadata.getDiscoveryFQDN())).collect(Collectors.toMap(InstanceMetaData::getDiscoveryFQDN, hostMetadata -> hostMetadata));
if (hostsToRemove.size() != hostNames.size()) {
List<String> missingHosts = hostNames.stream().filter(h -> !hostsToRemove.containsKey(h)).collect(Collectors.toList());
LOGGER.debug("Not all requested hosts found in CB for host group: {}. MissingCount={}, missingHosts=[{}]. Requested hosts: [{}]", hostGroup.getName(), missingHosts.size(), missingHosts, hostNames);
}
HostsResourceApi hostsResourceApi = clouderaManagerApiFactory.getHostsResourceApi(client);
try {
ApiHostList hostRefList = hostsResourceApi.readHosts(null, null, SUMMARY_REQUEST_VIEW);
List<String> runningHosts = hostRefList.getItems().stream().map(ApiHost::getHostname).collect(Collectors.toList());
// TODO: what if i remove a node from CM manually?
List<String> matchingCmHosts = hostsToRemove.keySet().stream().filter(hostName -> runningHosts.contains(hostName)).collect(Collectors.toList());
Set<String> matchingCmHostSet = new HashSet<>(matchingCmHosts);
if (matchingCmHosts.size() != hostsToRemove.size()) {
List<String> missingHostsInCm = hostsToRemove.keySet().stream().filter(h -> !matchingCmHostSet.contains(h)).collect(Collectors.toList());
LOGGER.debug("Not all requested hosts found in CM. MissingCount={}, missingHosts=[{}]. Requested hosts: [{}]", missingHostsInCm.size(), missingHostsInCm, hostsToRemove.keySet());
}
Sets.newHashSet(hostsToRemove.keySet()).stream().filter(hostName -> !matchingCmHostSet.contains(hostName)).forEach(hostsToRemove::remove);
LOGGER.debug("Collected hosts to remove: [{}]", hostsToRemove);
return hostsToRemove;
} catch (ApiException e) {
LOGGER.error("Failed to get host list for cluster: {}", stack.getName(), e);
throw new CloudbreakServiceException(e.getMessage(), e);
}
}
Aggregations