use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus.FAILED in project cloudbreak by hortonworks.
the class ClouderaManagerDecomissioner method collectDownscaleCandidates.
public Set<InstanceMetaData> collectDownscaleCandidates(ApiClient client, Stack stack, HostGroup hostGroup, Integer scalingAdjustment, Set<InstanceMetaData> instanceMetaDatasInStack) {
LOGGER.debug("Collecting downscale candidates");
Set<InstanceMetaData> instancesForHostGroup = instanceMetaDatasInStack.stream().filter(instanceMetaData -> instanceMetaData.getInstanceGroup().getGroupName().equals(hostGroup.getName())).collect(Collectors.toSet());
HostsResourceApi hostsResourceApi = clouderaManagerApiFactory.getHostsResourceApi(client);
try {
HostTemplatesResourceApi hostTemplatesResourceApi = clouderaManagerApiFactory.getHostTemplatesResourceApi(client);
ApiHostTemplateList hostTemplates = hostTemplatesResourceApi.readHostTemplates(stack.getName());
int replication = hostGroupNodesAreDataNodes(hostTemplates, hostGroup.getName()) ? getReplicationFactor(client, stack.getName()) : 0;
verifyNodeCount(replication, scalingAdjustment, instancesForHostGroup.size(), 0, stack);
ApiHostList hostRefList = hostsResourceApi.readHosts(null, null, SUMMARY_REQUEST_VIEW);
Set<InstanceMetaData> instancesToRemove = getUnusedInstances(scalingAdjustment, instancesForHostGroup, hostRefList);
List<ApiHost> apiHosts = hostRefList.getItems().stream().filter(host -> instancesForHostGroup.stream().filter(instanceMetaData -> instanceMetaData.getDiscoveryFQDN() != null).anyMatch(instanceMetaData -> instanceMetaData.getDiscoveryFQDN().equals(host.getHostname()))).collect(Collectors.toList());
Set<String> hostsToRemove = apiHosts.stream().sorted(hostHealthComparator).limit(Math.abs(scalingAdjustment) - instancesToRemove.size()).map(ApiHost::getHostname).collect(Collectors.toSet());
Set<InstanceMetaData> clouderaManagerNodesToRemove = instancesForHostGroup.stream().filter(instanceMetaData -> hostsToRemove.contains(instanceMetaData.getDiscoveryFQDN())).collect(Collectors.toSet());
instancesToRemove.addAll(clouderaManagerNodesToRemove);
LOGGER.debug("Downscale candidates: [{}]", instancesToRemove);
return instancesToRemove;
} catch (ApiException e) {
LOGGER.error("Failed to get host list for cluster: {}", stack.getName(), e);
throw new CloudbreakServiceException(e.getMessage(), e);
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus.FAILED in project cloudbreak by hortonworks.
the class ClouderaManagerDecomissioner method decommissionNodes.
public Set<String> decommissionNodes(Stack stack, Map<String, InstanceMetaData> hostsToRemove, ApiClient client) {
HostsResourceApi hostsResourceApi = clouderaManagerApiFactory.getHostsResourceApi(client);
try {
ApiHostList hostRefList = hostsResourceApi.readHosts(null, null, SUMMARY_REQUEST_VIEW);
List<String> stillAvailableRemovableHosts = hostRefList.getItems().stream().filter(apiHostRef -> hostsToRemove.containsKey(apiHostRef.getHostname())).parallel().map(ApiHost::getHostname).collect(Collectors.toList());
LOGGER.debug("Decommissioning nodes: [{}]", stillAvailableRemovableHosts);
boolean onlyLostNodesAffected = hostsToRemove.values().stream().allMatch(InstanceMetaData::isDeletedOnProvider);
String hostGroupName = hostsToRemove.values().stream().map(instanceMetaData -> instanceMetaData.getInstanceGroup().getGroupName()).findFirst().get();
ClouderaManagerResourceApi apiInstance = clouderaManagerApiFactory.getClouderaManagerResourceApi(client);
ApiHostNameList body = new ApiHostNameList().items(stillAvailableRemovableHosts);
ApiCommand apiCommand = apiInstance.hostsDecommissionCommand(body);
ExtendedPollingResult pollingResult = clouderaManagerPollingServiceProvider.startPollingCmHostDecommissioning(stack, client, apiCommand.getId(), onlyLostNodesAffected, stillAvailableRemovableHosts.size());
if (pollingResult.isExited()) {
throw new CancellationException("Cluster was terminated while waiting for host decommission");
} else if (pollingResult.isTimeout()) {
if (onlyLostNodesAffected) {
String warningMessage = "Cloudera Manager decommission host command {} polling timed out, " + "thus we are aborting the decommission and we are retrying it for lost nodes once again.";
abortDecommissionWithWarningMessage(apiCommand, client, warningMessage);
retryDecommissionNodes(apiInstance, body, stack, client, stillAvailableRemovableHosts, hostGroupName);
} else {
throw new CloudbreakServiceException("Timeout while Cloudera Manager decommissioned host.");
}
}
return stillAvailableRemovableHosts.stream().map(hostsToRemove::get).filter(instanceMetaData -> instanceMetaData.getDiscoveryFQDN() != null).map(InstanceMetaData::getDiscoveryFQDN).collect(Collectors.toSet());
} catch (ApiException e) {
LOGGER.error("Failed to decommission hosts: {}", hostsToRemove.keySet(), e);
throw new CloudbreakServiceException(e.getMessage(), e);
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus.FAILED in project cloudbreak by hortonworks.
the class CoreImageProvider method getImages.
@Override
public List<ImageWrapper> getImages(ImageSettingsRequest imageSettings, String region, String platform) {
try {
ImagesV4Response imagesV4Response = imageCatalogV4Endpoint.getImagesByName(WORKSPACE_ID_DEFAULT, imageSettings.getCatalog(), null, platform, null, null, false);
LOGGER.debug("Images received: {}", imagesV4Response);
return Optional.ofNullable(imagesV4Response.getFreeipaImages()).orElseGet(List::of).stream().map(this::convert).flatMap(Optional::stream).map(img -> new ImageWrapper(img, null, imageSettings.getCatalog())).collect(Collectors.toList());
} catch (WebApplicationException e) {
String errorMessage = messageExtractor.getErrorMessage(e);
LOGGER.warn("Fetching images failed with: {}", errorMessage, e);
return List.of();
} catch (Exception e) {
LOGGER.warn("Fetching images failed", e);
return List.of();
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus.FAILED in project cloudbreak by hortonworks.
the class CloudbreakUtil method waitForStatuses.
private static WaitResult waitForStatuses(CloudbreakClient cloudbreakClient, Long workspaceId, String stackName, Map<String, String> desiredStatuses, String accountId) {
WaitResult waitResult = WaitResult.SUCCESSFUL;
Map<String, String> currentStatuses = new HashMap<>();
int retryCount = 0;
do {
LOGGER.info("Waiting for status(es) {}, stack id: {}, current status(es) {} ...", desiredStatuses, stackName, currentStatuses);
sleep();
StackV4Endpoint stackV4Endpoint = cloudbreakClient.stackV4Endpoint();
try {
StackStatusV4Response statusResult = stackV4Endpoint.getStatusByName(workspaceId, stackName, accountId);
for (String statusPath : desiredStatuses.keySet()) {
currentStatuses.put(statusPath, statusResult.getStatus().name());
}
} catch (RuntimeException ignore) {
continue;
}
retryCount++;
} while (!checkStatuses(currentStatuses, desiredStatuses) && !checkFailedStatuses(currentStatuses) && retryCount < MAX_RETRY);
LOGGER.info("Status(es) {} for {} are in desired status(es) {}", desiredStatuses.keySet(), stackName, currentStatuses.values());
if (currentStatuses.values().stream().anyMatch(cs -> cs.contains("FAILED")) || checkNotExpectedDelete(currentStatuses, desiredStatuses)) {
waitResult = WaitResult.FAILED;
}
if (retryCount == MAX_RETRY) {
waitResult = WaitResult.TIMEOUT;
}
return waitResult;
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.base.InstanceStatus.FAILED in project cloudbreak by hortonworks.
the class DatabaseConfigServiceTest method testExistingConnectionFails.
@Test
public void testExistingConnectionFails() {
DatabaseConfig existingDatabaseConfig = new DatabaseConfig();
when(repository.findByEnvironmentIdAndName(ENVIRONMENT_CRN, DATABASE_NAME)).thenReturn(Optional.of(existingDatabaseConfig));
doAnswer((Answer) invocation -> {
MapBindingResult errors = invocation.getArgument(1, MapBindingResult.class);
errors.addError(new ObjectError("failed", ERROR_MESSAGE));
return null;
}).when(connectionValidator).validate(any(), any());
String result = underTest.testConnection(DATABASE_NAME, ENVIRONMENT_CRN);
assertEquals(ERROR_MESSAGE, result);
verify(connectionValidator).validate(eq(existingDatabaseConfig), any());
}
Aggregations