use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup in project cloudbreak by hortonworks.
the class ClusterHostServiceRunner method getRangerFqdn.
private List<String> getRangerFqdn(Cluster cluster, String primaryGatewayFqdn, List<String> rangerLocations) {
if (rangerLocations.size() > 1) {
// SDX HA has multiple ranger instances in different groups, in Knox we only want to expose the ones on the gateway.
InstanceGroup gatewayInstanceGroup = instanceGroupService.getPrimaryGatewayInstanceGroupByStackId(cluster.getStack().getId());
String gatewayGroupName = gatewayInstanceGroup.getGroupName();
List<String> hosts = rangerLocations.stream().filter(s -> s.contains(gatewayGroupName)).collect(Collectors.toList());
return hosts;
}
return rangerLocations.contains(primaryGatewayFqdn) ? asList(primaryGatewayFqdn) : asList(rangerLocations.iterator().next());
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup in project cloudbreak by hortonworks.
the class GrainPropertiesService method setCloudIdentityRoles.
private void setCloudIdentityRoles(GrainProperties propertiesForIdentityRoles, InstanceMetaData instanceMetaData) {
InstanceGroup instanceGroup = instanceMetaData.getInstanceGroup();
CloudIdentityType cloudIdentityType = instanceGroup.getCloudIdentityType().orElse(CloudIdentityType.LOG);
Map<String, String> grainsForInstance = new HashMap<>();
grainsForInstance.put(ROLES, cloudIdentityType.roleName());
propertiesForIdentityRoles.put(instanceMetaData.getDiscoveryFQDN(), grainsForInstance);
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup in project cloudbreak by hortonworks.
the class StackCreationService method setInstanceStoreCount.
public void setInstanceStoreCount(StackContext stackContext) {
Stack stack = stackContext.getStack();
CloudConnector<Object> connector = cloudPlatformConnectors.get(stackContext.getCloudContext().getPlatformVariant());
AuthenticatedContext ac = connector.authentication().authenticate(stackContext.getCloudContext(), stackContext.getCloudCredential());
List<String> instanceTypes = stack.getInstanceGroups().stream().map(InstanceGroup::getTemplate).filter(Objects::nonNull).map(Template::getInstanceType).filter(Objects::nonNull).collect(Collectors.toList());
InstanceStoreMetadata instanceStoreMetadata = connector.metadata().collectInstanceStorageCount(ac, instanceTypes);
for (InstanceGroup ig : stack.getInstanceGroups()) {
Template template = ig.getTemplate();
if (template != null) {
Integer instanceStorageCount = instanceStoreMetadata.mapInstanceTypeToInstanceStoreCountNullHandled(template.getInstanceType());
if (ephemeralVolumeChecker.instanceGroupContainsOnlyDatabaseAndEphemeralVolumes(ig)) {
LOGGER.debug("Instance storage was already requested. Setting temporary storage in template to: {}. " + "Group name: {}, Template id: {}, instance type: {}", TemporaryStorage.EPHEMERAL_VOLUMES_ONLY.name(), ig.getGroupName(), template.getId(), template.getInstanceType());
template.setTemporaryStorage(TemporaryStorage.EPHEMERAL_VOLUMES_ONLY);
} else if (instanceStorageCount > 0 && stack.getType().equals(StackType.WORKLOAD)) {
LOGGER.debug("The host group's instance type has ephemeral volumes. Setting temporary storage in template to: {}. " + "Group name: {}, Template id: {}, instance type: {}", TemporaryStorage.EPHEMERAL_VOLUMES.name(), ig.getGroupName(), template.getId(), template.getInstanceType());
template.setTemporaryStorage(TemporaryStorage.EPHEMERAL_VOLUMES);
}
LOGGER.debug("Setting instance storage count in template. " + "Group name: {}, Template id: {}, instance type: {}", ig.getGroupName(), template.getId(), template.getInstanceType());
template.setInstanceStorageCount(instanceStorageCount);
templateService.savePure(template);
}
}
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup in project cloudbreak by hortonworks.
the class StackUpscaleActions method finishExtendMetadata.
@Bean(name = "EXTEND_METADATA_FINISHED_STATE")
public Action<?, ?> finishExtendMetadata() {
return new AbstractStackUpscaleAction<>(CollectMetadataResult.class) {
@Override
protected void doExecute(StackScalingFlowContext context, CollectMetadataResult payload, Map<Object, Object> variables) throws TransactionExecutionException {
Integer adjustment = context.getHostGroupWithAdjustment().values().stream().reduce(0, Integer::sum);
Set<String> upscaleCandidateAddresses = stackUpscaleService.finishExtendMetadata(context.getStack(), adjustment, payload);
variables.put(UPSCALE_CANDIDATE_ADDRESSES, upscaleCandidateAddresses);
Set<String> hostGroups = context.getHostGroupWithAdjustment().keySet();
List<InstanceGroup> scaledInstanceGroups = instanceGroupService.findByStackIdAndInstanceGroupNames(payload.getResourceId(), hostGroups);
boolean gatewayWasUpscaled = scaledInstanceGroups.stream().anyMatch(instanceGroup -> InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType()));
if (gatewayWasUpscaled) {
LOGGER.info("Gateway type instance group");
Stack stack = stackService.getByIdWithListsInTransaction(context.getStack().getId());
InstanceMetaData gatewayMetaData = stack.getPrimaryGatewayInstance();
if (null == gatewayMetaData) {
throw new CloudbreakServiceException("Could not get gateway instance metadata from the cloud provider.");
}
DetailedEnvironmentResponse environment = environmentClientService.getByCrnAsInternal(stack.getEnvironmentCrn());
CloudInstance gatewayInstance = metadataConverter.convert(gatewayMetaData, environment, stack.getStackAuthentication());
LOGGER.info("Send GetSSHFingerprintsRequest because we need to collect SSH fingerprints");
Selectable sshFingerPrintReq = new GetSSHFingerprintsRequest<GetSSHFingerprintsResult>(context.getCloudContext(), context.getCloudCredential(), gatewayInstance);
sendEvent(context, sshFingerPrintReq);
} else {
StackEvent bootstrapPayload = new StackEvent(context.getStack().getId());
sendEvent(context, StackUpscaleEvent.BOOTSTRAP_NEW_NODES_EVENT.event(), bootstrapPayload);
}
}
};
}
use of com.sequenceiq.cloudbreak.domain.stack.instance.InstanceGroup in project cloudbreak by hortonworks.
the class ValidateCloudConfigHandler method doAccept.
@Override
protected Selectable doAccept(HandlerEvent<ValidateCloudConfigRequest> event) {
ValidateCloudConfigRequest data = event.getData();
Stack stack = stackService.getByIdWithLists(data.getResourceId());
String name = stack.getName();
DetailedEnvironmentResponse environment = environmentClientService.getByCrn(stack.getEnvironmentCrn());
Credential credential = credentialConverter.convert(environment.getCredential());
CloudCredential cloudCredential = credentialToCloudCredentialConverter.convert(credential);
ValidationResult.ValidationResultBuilder validationBuilder = ValidationResult.builder();
stackValidator.validate(stack, validationBuilder);
Set<InstanceGroup> instanceGroups = stack.getInstanceGroups();
measure(() -> {
for (InstanceGroup instanceGroup : instanceGroups) {
LOGGER.info("Validate template for {} name with {} instanceGroup.", name, instanceGroup.toString());
StackType type = stack.getType();
templateValidator.validate(credential, instanceGroup, stack, fromStackType(type == null ? null : type.name()), Optional.of(stack.getCreator()), validationBuilder);
}
}, LOGGER, "Stack's instance templates have been validated in {} ms for stack {}", name);
multiAzValidator.validateMultiAzForStack(stack.getPlatformVariant(), instanceGroups, validationBuilder);
ParametersValidationRequest parametersValidationRequest = parametersValidator.validate(stack.getCloudPlatform(), cloudCredential, stack.getParameters(), stack.getWorkspace().getId());
parametersValidator.waitResult(parametersValidationRequest, validationBuilder);
if (!StackType.LEGACY.equals(stack.getType())) {
dataLakeValidator.validate(stack, validationBuilder);
}
environmentValidator.validate(stack, environment, stack.getType().equals(StackType.WORKLOAD), validationBuilder);
ValidationResult validationResult = validationBuilder.build();
if (validationResult.getState() == ValidationResult.State.ERROR || validationResult.hasError()) {
LOGGER.debug("Stack request has validation error(s): {}.", validationResult.getFormattedErrors());
throw new IllegalStateException(validationResult.getFormattedErrors());
} else {
LOGGER.debug("Stack validation has been finished without any error.");
return new StackEvent(CloudConfigValidationEvent.VALIDATE_CLOUD_CONFIG_FINISHED_EVENT.selector(), data.getResourceId());
}
}
Aggregations