use of com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status in project cloudbreak by hortonworks.
the class StackDownscaleValidatorServiceTest method testCheckClusterInValidStatusWhenInStoppedStatus.
@Test
public void testCheckClusterInValidStatusWhenInStoppedStatus() {
Stack stack = new Stack();
stack.setStackStatus(new StackStatus(stack, DetailedStackStatus.STOPPED));
expectedException.expect(BadRequestException.class);
expectedException.expectMessage("Cluster is in Stopped status. Please start the cluster for downscale.");
underTest.checkClusterInValidStatus(stack);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status in project cloudbreak by hortonworks.
the class StackStatusCheckerJob method switchToShortSyncIfNecessary.
private void switchToShortSyncIfNecessary(JobExecutionContext context) {
if (isLongSyncJob(context)) {
Stack stack = stackService.get(getStackId());
Status stackStatus = stack.getStatus();
if (!longSyncableStates().contains(stackStatus)) {
jobService.schedule(getStackId(), StackJobAdapter.class);
}
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status in project cloudbreak by hortonworks.
the class ArchiveInstanceMetaDataJob method executeTracedJob.
@Override
protected void executeTracedJob(JobExecutionContext context) throws JobExecutionException {
StackView stackView = stackViewService.findById(getStackId()).orElseGet(StackView::new);
Status stackStatus = stackView.getStatus();
if (!Status.getUnschedulableStatuses().contains(stackStatus)) {
archiveInstanceMetaDataOnStack(stackView);
} else {
LOGGER.debug("Existing stack InstanceMetaData archiving will be descheduled, because stack {} state is {}", stackView.getResourceCrn(), stackStatus);
jobService.unschedule(context.getJobDetail().getKey());
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status in project cloudbreak by hortonworks.
the class MetadataSetupService method saveLoadBalancerMetadata.
public void saveLoadBalancerMetadata(Stack stack, Iterable<CloudLoadBalancerMetadata> cloudLoadBalancerMetadataList) {
try {
LOGGER.info("Save load balancer metadata for stack: {}", stack.getName());
Set<LoadBalancer> allLoadBalancerMetadata = loadBalancerPersistenceService.findByStackId(stack.getId());
for (CloudLoadBalancerMetadata cloudLoadBalancerMetadata : cloudLoadBalancerMetadataList) {
LoadBalancer loadBalancerEntry = createLoadBalancerMetadataIfAbsent(allLoadBalancerMetadata, stack, cloudLoadBalancerMetadata.getType());
loadBalancerEntry.setDns(cloudLoadBalancerMetadata.getCloudDns());
loadBalancerEntry.setHostedZoneId(cloudLoadBalancerMetadata.getHostedZoneId());
loadBalancerEntry.setIp(cloudLoadBalancerMetadata.getIp());
loadBalancerEntry.setType(cloudLoadBalancerMetadata.getType());
String endpoint = loadBalancerConfigService.generateLoadBalancerEndpoint(stack);
List<StackIdView> byEnvironmentCrnAndStackType = stackService.getByEnvironmentCrnAndStackType(stack.getEnvironmentCrn(), StackType.DATALAKE);
List<StackStatus> stoppedDatalakes = byEnvironmentCrnAndStackType.stream().map(s -> stackStatusService.findFirstByStackIdOrderByCreatedDesc(s.getId())).filter(Optional::isPresent).map(Optional::get).filter(status -> status.getStatus().isStopState()).collect(Collectors.toList());
if (!stoppedDatalakes.isEmpty()) {
/* Starts to check for a situation where we are resizing a datalake that did not previously have loadbalancers
so that we can use the same endpoint name for a seamless transistion
*/
LOGGER.info("Using old datalake endpoint name for resized datalake: {}, env: {}", stack.getName(), stack.getEnvironmentCrn());
if (stoppedDatalakes.size() > 1) {
String ids = stoppedDatalakes.stream().map(stackStatus -> stackStatus.getStack().getId()).map(Object::toString).collect(Collectors.joining(","));
LOGGER.warn("more than one datalake found to resize from: {}", ids);
}
Long oldId = stoppedDatalakes.get(0).getStack().getId();
Set<LoadBalancer> oldLoadbalancers = loadBalancerPersistenceService.findByStackId(oldId);
if (oldLoadbalancers.isEmpty()) {
Stack oldStack = stackService.getByIdWithGatewayInTransaction(oldId);
if (stack.getDisplayName().equals(oldStack.getDisplayName())) {
endpoint = oldStack.getPrimaryGatewayInstance().getShortHostname();
}
}
}
LOGGER.info("Saving load balancer endpoint as: {}", endpoint);
loadBalancerEntry.setEndpoint(endpoint);
loadBalancerEntry.setProviderConfig(loadBalancerConfigConverter.convertLoadBalancer(stack.getCloudPlatform(), cloudLoadBalancerMetadata));
loadBalancerPersistenceService.save(loadBalancerEntry);
Set<TargetGroup> targetGroups = targetGroupPersistenceService.findByLoadBalancerId(loadBalancerEntry.getId());
for (TargetGroup targetGroup : targetGroups) {
targetGroup.setProviderConfig(loadBalancerConfigConverter.convertTargetGroup(stack.getCloudPlatform(), cloudLoadBalancerMetadata, targetGroup));
targetGroupPersistenceService.save(targetGroup);
}
}
} catch (Exception ex) {
throw new CloudbreakServiceException("Load balancer metadata collection failed", ex);
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.common.Status in project cloudbreak by hortonworks.
the class RecoveryTeardownService method handleRecoveryTeardownError.
public void handleRecoveryTeardownError(StackView stack, Exception errorDetails) {
Long stackId = stack.getId();
String stackUpdateMessage = "Recovery failed: " + errorDetails.getMessage();
DetailedStackStatus status = DetailedStackStatus.CLUSTER_RECOVERY_FAILED;
stackUpdater.updateStackStatus(stackId, status, stackUpdateMessage);
LOGGER.info("Error during stack recovery flow: ", errorDetails);
metricService.incrementMetricCounter(MetricType.STACK_RECOVERY_TEARDOWN_FAILED, stack, errorDetails);
flowMessageService.fireEventAndLog(stackId, status.name(), DATALAKE_RECOVERY_FAILED, stackUpdateMessage);
}
Aggregations