use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class ClusterV4RequestToClusterConverterTest method testConvertClouderaManagerRequestWithNullRepo.
@Test
public void testConvertClouderaManagerRequestWithNullRepo() {
ClusterV4Request request = new ClusterV4Request();
request.setBlueprintName(BLUEPRINT);
blueprint.setStackType(StackType.CDH.name());
when(blueprintService.getByNameForWorkspaceAndLoadDefaultsIfNecessary(eq(BLUEPRINT), any())).thenReturn(blueprint);
ClouderaManagerV4Request cm = new ClouderaManagerV4Request();
ClouderaManagerProductV4Request cdp = new ClouderaManagerProductV4Request();
cdp.setName("cdp");
cdp.setParcel("cdp.parcel");
cdp.setVersion("cdp.version");
cdp.setCsd(List.of("cdp.csd"));
ClouderaManagerProductV4Request cdf = new ClouderaManagerProductV4Request();
cdf.setName("cdf");
cdf.setParcel("cdf.parcel");
cdf.setVersion("cdf.version");
cdf.setCsd(List.of("cdf.csd"));
List<ClouderaManagerProductV4Request> products = List.of(cdp, cdf);
cm.setProducts(products);
request.setCm(cm);
Cluster cluster = underTest.convert(request);
assertFalse(cluster.getComponents().isEmpty());
assertEquals(2, cluster.getComponents().size());
assertAll(cluster.getComponents().stream().map(component -> () -> assertEquals(ComponentType.CDH_PRODUCT_DETAILS, component.getComponentType())));
List<Json> cdps = cluster.getComponents().stream().map(ClusterComponent::getAttributes).filter(attr -> attr.getValue().contains("cdp")).collect(Collectors.toList());
Json cdpJson = new Json(cdp);
assertAll(() -> assertEquals(1, cdps.size()), () -> assertEquals(cdpJson, cdps.iterator().next()));
List<Json> cdfs = cluster.getComponents().stream().map(ClusterComponent::getAttributes).filter(attr -> attr.getValue().contains("cdf")).collect(Collectors.toList());
Json cdfJson = new Json(cdf);
assertAll(() -> assertEquals(1, cdfs.size()), () -> assertEquals(cdfJson, cdfs.iterator().next()));
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class TemplateValidatorTest method setUp.
@BeforeEach
public void setUp() {
credential = TestUtil.awsCredential();
user = new User();
optionalUser = Optional.of(user);
MockitoAnnotations.initMocks(this);
stack = TestUtil.stack(Status.AVAILABLE, credential);
Cluster cluster = TestUtil.cluster();
stack.setCluster(cluster);
String location = "fake location";
VmTypeMeta.VmTypeMetaBuilder vmMetaBuilder = VmTypeMeta.VmTypeMetaBuilder.builder().withCpuAndMemory(Integer.valueOf(8), Float.valueOf(15)).withPrice(0.42).withVolumeEncryptionSupport(true).withSsdConfig(1, 17592, 1, 24);
VmType c3VmType = VmType.vmTypeWithMeta("c3.2xlarge", vmMetaBuilder.create(), false);
VmType i3VmType = VmType.vmTypeWithMeta("i3.2xlarge", vmMetaBuilder.withEphemeralConfig(1, 17592, 1, 24).create(), false);
Map<String, Set<VmType>> machines = new HashMap<>();
machines.put(location, Set.of(c3VmType, i3VmType));
cloudVmTypes = new CloudVmTypes(machines, new HashMap<>());
when(cloudParameterService.getVmTypesV2(isNull(), anyString(), isNull(), any(CdpResourceType.class), any(HashMap.class))).thenReturn(cloudVmTypes);
Platform platform = Platform.platform("AWS");
Map<Platform, Map<String, VolumeParameterType>> diskMappings = new HashMap<>();
Map<String, VolumeParameterType> diskTypeMap = new HashMap<>();
diskTypeMap.put("standard", VolumeParameterType.SSD);
diskTypeMap.put(AwsDiskType.Ephemeral.value(), VolumeParameterType.EPHEMERAL);
diskMappings.put(platform, diskTypeMap);
platformDisks = new PlatformDisks(new HashMap<>(), new HashMap<>(), diskMappings, new HashMap<>());
when(cloudParameterService.getDiskTypes()).thenReturn(platformDisks);
when(locationService.location(anyString(), isNull())).thenReturn(location);
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class SshKeyService method createSshStateParams.
private OrchestratorStateParams createSshStateParams(Stack stack, String user, KeyPair keyPair, String authKeysComment, String saltState) {
Cluster cluster = stack.getCluster();
Set<Node> nodes = stackUtil.collectReachableNodes(stack);
OrchestratorStateParams stateParams = new OrchestratorStateParams();
stateParams.setState(saltState);
stateParams.setPrimaryGatewayConfig(gatewayConfigService.getGatewayConfig(stack, stack.getPrimaryGatewayInstance(), stack.getCluster().hasGateway()));
stateParams.setTargetHostNames(nodes.stream().map(Node::getHostname).collect(Collectors.toSet()));
stateParams.setAllNodes(nodes);
stateParams.setExitCriteriaModel(ClusterDeletionBasedExitCriteriaModel.clusterDeletionBasedModel(stack.getId(), cluster.getId()));
stateParams.setStateParams(createSshParams(user, keyPair, authKeysComment));
OrchestratorStateRetryParams retryParams = new OrchestratorStateRetryParams();
retryParams.setMaxRetry(SSH_KEY_OPERATION_RETRY_COUNT);
stateParams.setStateRetryParams(retryParams);
return stateParams;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class BaseLegacyStructuredFlowEventFactory method createStructuredNotificationEvent.
public StructuredNotificationEvent createStructuredNotificationEvent(Stack stack, String notificationType, String message, String instanceGroupName) {
Long stackId = stack.getId();
NotificationDetails notificationDetails = new NotificationDetails();
notificationDetails.setNotificationType(notificationType);
notificationDetails.setNotification(message);
notificationDetails.setStackId(stackId);
String stackName = stack.getName();
String userName = stack.getCreator().getUserName();
String userId = stack.getCreator().getUserId();
try {
notificationDetails.setCloud(stack.cloudPlatform());
notificationDetails.setRegion(stack.getRegion());
notificationDetails.setAvailabiltyZone(stack.getAvailabilityZone());
notificationDetails.setStackName(stack.getDisplayName());
notificationDetails.setStackStatus(stack.getStatus().name());
notificationDetails.setNodeCount(stack.getNotDeletedInstanceMetaDataSet().size());
Cluster cluster = stack.getCluster();
notificationDetails.setInstanceGroup(instanceGroupName);
if (cluster != null) {
notificationDetails.setClusterId(cluster.getId());
notificationDetails.setClusterName(cluster.getName());
notificationDetails.setClusterStatus(stack.getStatus().name());
Blueprint blueprint = cluster.getBlueprint();
if (blueprint != null) {
notificationDetails.setBlueprintId(blueprint.getId());
notificationDetails.setBlueprintName(blueprint.getStackName());
}
}
} catch (AccessDeniedException e) {
LOGGER.info("Access denied in structured notification event creation, user: {}, stack: {}", userName, stackId, e);
}
String resourceType = (stack.getType() == null || stack.getType().equals(StackType.WORKLOAD)) ? CloudbreakEventService.DATAHUB_RESOURCE_TYPE : CloudbreakEventService.DATALAKE_RESOURCE_TYPE;
OperationDetails operationDetails = new OperationDetails(clock.getCurrentTimeMillis(), NOTIFICATION, resourceType, stackId, stackName, nodeConfig.getInstanceUUID(), cbVersion, stack.getWorkspace().getId(), userId, userName, stack.getTenant().getName(), stack.getResourceCrn(), stack.getCreator().getUserCrn(), stack.getEnvironmentCrn(), null);
return new StructuredNotificationEvent(operationDetails, notificationDetails);
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.Cluster in project cloudbreak by hortonworks.
the class BaseLegacyStructuredFlowEventFactory method createStucturedFlowEvent.
@Override
public StructuredFlowEvent createStucturedFlowEvent(Long stackId, FlowDetails flowDetails, Boolean detailed, Exception exception) {
Stack stack = stackService.getByIdWithTransaction(stackId);
String resourceType = (stack.getType() == null || stack.getType().equals(StackType.WORKLOAD)) ? CloudbreakEventService.DATAHUB_RESOURCE_TYPE : CloudbreakEventService.DATALAKE_RESOURCE_TYPE;
OperationDetails operationDetails = new OperationDetails(clock.getCurrentTimeMillis(), FLOW, resourceType, stackId, stack.getName(), nodeConfig.getId(), cbVersion, stack.getWorkspace().getId(), stack.getCreator().getUserId(), stack.getCreator().getUserName(), stack.getTenant().getName(), stack.getResourceCrn(), stack.getCreator().getUserCrn(), stack.getEnvironmentCrn(), null);
StackDetails stackDetails = null;
ClusterDetails clusterDetails = null;
BlueprintDetails blueprintDetails = null;
if (detailed) {
stackDetails = stackToStackDetailsConverter.convert(stack);
Cluster cluster = stack.getCluster();
if (cluster != null) {
clusterDetails = clusterToClusterDetailsConverter.convert(cluster);
blueprintDetails = getIfNotNull(cluster.getBlueprint(), blueprintToBlueprintDetailsConverter::convert);
}
}
StructuredFlowEvent event = new StructuredFlowEvent(operationDetails, flowDetails, stackDetails, clusterDetails, blueprintDetails);
if (exception != null) {
event.setException(ExceptionUtils.getStackTrace(exception));
}
return event;
}
Aggregations