use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class StackOperations method updateNameAndCrn.
public void updateNameAndCrn(@NotNull NameOrCrn nameOrCrn, Long workspaceId, String newName, String newCrn) {
Stack stack = stackService.getByNameOrCrnInWorkspace(nameOrCrn, workspaceId);
stackUpdateService.updateNameAndCrn(stack, newName, newCrn);
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class StackOperationService method start.
@VisibleForTesting
FlowIdentifier start(Stack stack, Cluster cluster, boolean updateCluster) {
FlowIdentifier flowIdentifier = FlowIdentifier.notTriggered();
environmentService.checkEnvironmentStatus(stack, EnvironmentStatus.startable());
dataLakeStatusCheckerService.validateRunningState(stack);
if (stack.isAvailable()) {
eventService.fireCloudbreakEvent(stack.getId(), AVAILABLE.name(), STACK_START_IGNORED);
} else if (stack.isReadyForStart() || stack.isStartFailed()) {
Stack startStack = stackUpdater.updateStackStatus(stack.getId(), DetailedStackStatus.START_REQUESTED);
flowIdentifier = flowManager.triggerStackStart(stack.getId());
if (updateCluster && cluster != null) {
clusterOperationService.updateStatus(startStack, StatusRequest.STARTED);
}
} else {
throw NotAllowedStatusUpdate.stack(stack).to(DetailedStackStatus.START_REQUESTED).badRequest();
}
return flowIdentifier;
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ParcelSizeServiceTest method testGetAllParcelSizeShouldThrowExceptionWhenTheParcelSizeIsZero.
@Test(expected = CloudbreakException.class)
public void testGetAllParcelSizeShouldThrowExceptionWhenTheParcelSizeIsZero() throws CloudbreakException {
Stack stack = new Stack();
Set<String> parcelUrls = createParcelUrls();
when(parcelUrlProvider.getRequiredParcelsFromImage(targetImage, stack)).thenReturn(parcelUrls);
when(request.head()).thenReturn(response);
when(response.getHeaderString("Content-Length")).thenReturn("0");
underTest.getRequiredFreeSpace(targetImage, stack);
verify(restClientFactory).getOrCreateDefault();
verify(parcelUrlProvider).getRequiredParcelsFromImage(targetImage, stack);
verify(paywallCredentialPopulator).populateWebTarget(any(), eq(webTarget));
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterUpgradeHandler method getRemoteDataContext.
private Optional<String> getRemoteDataContext(Stack stack) {
Optional<String> remoteDataContext = Optional.empty();
if (!stack.isDatalake() && StringUtils.isNotEmpty(stack.getDatalakeCrn())) {
Stack datalake = stackService.getByCrn(stack.getDatalakeCrn());
LOGGER.info("Fetch the Remote Data Context from {} to update the Data Hub", stack.getName());
ClusterApi datalakeConnector = clusterApiConnectors.getConnector(datalake);
remoteDataContext = Optional.of(datalakeConnector.getSdxContext());
}
return remoteDataContext;
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class RecipeEngineTest method testUploadUpscaleRecipesWithoutRecipe.
@Test
public void testUploadUpscaleRecipesWithoutRecipe() throws CloudbreakException {
// GIVEN
HostGroup hostGroup = new HostGroup();
hostGroup.setName("worker");
// WHEN
recipeEngine.uploadUpscaleRecipes(stack(), Set.of(hostGroup), hostGroups());
// THEN
verify(orchestratorRecipeExecutor, times(0)).uploadRecipes(any(Stack.class), anyMap());
verify(recipeTemplateService, times(0)).updateAllGeneratedRecipes(anySet(), anyMap());
}
Aggregations