use of com.vmware.photon.controller.model.resources.DiskService.DiskState in project photon-model by vmware.
the class AWSDiskService method createDisk.
/**
* Create a volume on aws that represents the requested disk.
*/
private void createDisk(AWSDiskContext context) {
if (context.diskRequest.isMockRequest) {
Volume vol = getMockVolume();
updateDiskState(vol, context, AwsDiskStage.FINISHED);
return;
}
DiskState diskState = context.disk;
// add endpointLinks
AdapterUtils.addToEndpointLinks(diskState, context.disk.endpointLink);
if (diskState.capacityMBytes <= 0) {
String message = "Disk size has to be positive";
this.logWarning(() -> "[AWSDiskService] " + message);
throw new IllegalArgumentException(message);
}
if (diskState.customProperties != null && diskState.customProperties.get(DEVICE_TYPE) != null && diskState.customProperties.get(DEVICE_TYPE).equals(AWSConstants.AWSStorageType.INSTANCE_STORE.getName())) {
String message = "Independent Instance Store disk cannot be created.";
this.logWarning(() -> "[AWSDiskService] " + message);
throw new IllegalArgumentException(message);
}
CreateVolumeRequest req = new CreateVolumeRequest();
String zoneId = diskState.zoneId;
if (zoneId == null) {
List<AvailabilityZone> availabilityZoneList = context.client.getAvailabilityZones();
if (availabilityZoneList.isEmpty()) {
String message = String.format("No zones are available in the region %s:", diskState.regionId);
this.logSevere(() -> "[AWSDiskService] " + message);
throw new IllegalArgumentException(message);
}
zoneId = availabilityZoneList.get(0).getZoneName();
}
// set availability zone
req.withAvailabilityZone(zoneId);
// set volume size
int diskSize = (int) diskState.capacityMBytes / 1024;
req.withSize(diskSize);
// set encrypted field
Boolean encrypted = diskState.encrypted == null ? false : diskState.encrypted;
req.withEncrypted(encrypted);
AWSUtils.setEbsDefaultsIfNotSet(diskState, Boolean.TRUE);
validateSizeSupportedByVolumeType(diskSize, diskState.customProperties.get(VOLUME_TYPE));
// set volume type
if (diskState.customProperties.containsKey(VOLUME_TYPE)) {
req.withVolumeType(diskState.customProperties.get(VOLUME_TYPE));
}
// set iops
String diskIops = diskState.customProperties.get(DISK_IOPS);
if (diskIops != null && !diskIops.isEmpty()) {
int iops = Integer.parseInt(diskIops);
if (iops > diskSize * MAX_IOPS_PER_GiB) {
String info = String.format("[AWSDiskService] Requested IOPS (%s) exceeds" + " the maximum value supported by %sGiB disk. Continues " + "provisioning the disk with %s iops", iops, diskSize, diskSize * MAX_IOPS_PER_GiB);
this.logInfo(() -> info);
iops = diskSize * MAX_IOPS_PER_GiB;
}
req.withIops(iops);
}
AsyncHandler<CreateVolumeRequest, CreateVolumeResult> creationHandler = new AWSDiskCreationHandler(this, context);
context.client.createVolume(req, creationHandler);
}
use of com.vmware.photon.controller.model.resources.DiskService.DiskState in project photon-model by vmware.
the class AWSInstanceService method createInstance.
private void createInstance(AWSInstanceContext aws) {
if (aws.computeRequest.isMockRequest) {
aws.taskManager.finishTask();
return;
}
final DiskState bootDisk = aws.bootDisk;
if (bootDisk == null) {
aws.taskManager.patchTaskToFailure(new IllegalStateException("AWS bootDisk not specified"));
return;
}
if (bootDisk.bootConfig != null && bootDisk.bootConfig.files.length > 1) {
aws.taskManager.patchTaskToFailure(new IllegalStateException("Only 1 configuration file allowed"));
return;
}
// This a single disk state with a bootConfig. There's no expectation
// that it does exists, but if it does, we only support cloud configs at
// this point.
String cloudConfig = null;
if (bootDisk.bootConfig != null && bootDisk.bootConfig.files.length > CLOUD_CONFIG_DEFAULT_FILE_INDEX) {
cloudConfig = bootDisk.bootConfig.files[CLOUD_CONFIG_DEFAULT_FILE_INDEX].contents;
}
String instanceType = aws.child.description.instanceType;
if (instanceType == null) {
// fallback to legacy usage of name
instanceType = aws.child.description.name;
}
if (instanceType == null) {
aws.error = new IllegalStateException("AWS Instance type not specified");
aws.stage = AWSInstanceStage.ERROR;
handleAllocation(aws);
return;
}
RunInstancesRequest runInstancesRequest = new RunInstancesRequest().withImageId(aws.bootDiskImageNativeId).withInstanceType(instanceType).withMinCount(1).withMaxCount(1).withMonitoring(true).withTagSpecifications(new TagSpecification().withResourceType(ResourceType.Instance).withTags(aws.getAWSTags()));
if (aws.placement != null) {
runInstancesRequest.withPlacement(new Placement(aws.placement));
}
if (aws.child.customProperties != null && aws.child.customProperties.containsKey(CUSTOM_PROP_SSH_KEY_NAME)) {
runInstancesRequest = runInstancesRequest.withKeyName(aws.child.customProperties.get(CUSTOM_PROP_SSH_KEY_NAME));
}
if (!aws.dataDisks.isEmpty() || bootDisk.capacityMBytes > 0 || bootDisk.customProperties != null) {
DescribeImagesRequest imagesDescriptionRequest = new DescribeImagesRequest();
imagesDescriptionRequest.withImageIds(aws.bootDiskImageNativeId);
DescribeImagesResult imagesDescriptionResult = aws.amazonEC2Client.describeImages(imagesDescriptionRequest);
if (imagesDescriptionResult.getImages().size() != 1) {
handleError(aws, new IllegalStateException("AWS ImageId is not available"));
return;
}
Image image = imagesDescriptionResult.getImages().get(0);
AssertUtil.assertNotNull(aws.instanceTypeInfo, "instanceType cannot be null");
List<BlockDeviceMapping> blockDeviceMappings = image.getBlockDeviceMappings();
String rootDeviceType = image.getRootDeviceType();
String bootDiskType = bootDisk.customProperties.get(DEVICE_TYPE);
boolean hasHardConstraint = containsHardConstraint(bootDisk);
BlockDeviceMapping rootDeviceMapping = null;
try {
// The number of instance-store disks that will be provisioned is limited by the instance-type.
suppressExcessInstanceStoreDevices(blockDeviceMappings, aws.instanceTypeInfo);
for (BlockDeviceMapping blockDeviceMapping : blockDeviceMappings) {
EbsBlockDevice ebs = blockDeviceMapping.getEbs();
String diskType = getDeviceType(ebs);
if (hasHardConstraint) {
validateIfDeviceTypesAreMatching(diskType, bootDiskType);
}
if (blockDeviceMapping.getNoDevice() != null) {
continue;
}
if (rootDeviceType.equals(AWSStorageType.EBS.getName()) && blockDeviceMapping.getDeviceName().equals(image.getRootDeviceName())) {
rootDeviceMapping = blockDeviceMapping;
continue;
}
DiskState diskState = new DiskState();
copyCustomProperties(diskState, bootDisk);
addMandatoryProperties(diskState, blockDeviceMapping, aws);
updateDeviceMapping(diskType, bootDiskType, blockDeviceMapping.getDeviceName(), ebs, diskState);
// update disk state with final volume-type and iops
if (diskType.equals(AWSStorageType.EBS.getName())) {
diskState.customProperties.put(VOLUME_TYPE, ebs.getVolumeType());
diskState.customProperties.put(DISK_IOPS, String.valueOf(ebs.getIops()));
}
aws.imageDisks.add(diskState);
}
customizeBootDiskProperties(bootDisk, rootDeviceType, rootDeviceMapping, hasHardConstraint, aws);
List<DiskState> ebsDisks = new ArrayList<>();
List<DiskState> instanceStoreDisks = new ArrayList<>();
if (!aws.dataDisks.isEmpty()) {
if (!rootDeviceType.equals(AWSStorageType.EBS.name().toLowerCase())) {
instanceStoreDisks = aws.dataDisks;
assertAndResetPersistence(instanceStoreDisks);
validateSupportForAdditionalInstanceStoreDisks(instanceStoreDisks, blockDeviceMappings, aws.instanceTypeInfo, rootDeviceType);
} else {
splitDataDisks(aws.dataDisks, instanceStoreDisks, ebsDisks);
setEbsDefaultsIfNotSpecified(ebsDisks, Boolean.FALSE);
if (!instanceStoreDisks.isEmpty()) {
assertAndResetPersistence(instanceStoreDisks);
validateSupportForAdditionalInstanceStoreDisks(instanceStoreDisks, blockDeviceMappings, aws.instanceTypeInfo, rootDeviceType);
}
}
}
// get the available attach paths for new disks and external disks
List<String> usedDeviceNames = null;
if (!instanceStoreDisks.isEmpty() || !ebsDisks.isEmpty() || !aws.externalDisks.isEmpty()) {
usedDeviceNames = getUsedDeviceNames(blockDeviceMappings);
}
if (!instanceStoreDisks.isEmpty()) {
List<String> usedVirtualNames = getUsedVirtualNames(blockDeviceMappings);
blockDeviceMappings.addAll(createInstanceStoreMappings(instanceStoreDisks, usedDeviceNames, usedVirtualNames, aws.instanceTypeInfo.id, aws.instanceTypeInfo.dataDiskSizeInMB, image.getPlatform(), image.getVirtualizationType()));
}
if (!ebsDisks.isEmpty() || !aws.externalDisks.isEmpty()) {
aws.availableEbsDiskNames = AWSBlockDeviceNameMapper.getAvailableNames(AWSSupportedOS.get(image.getPlatform()), AWSSupportedVirtualizationTypes.get(image.getVirtualizationType()), AWSStorageType.EBS, instanceType, usedDeviceNames);
}
if (!ebsDisks.isEmpty()) {
blockDeviceMappings.addAll(createEbsDeviceMappings(ebsDisks, aws.availableEbsDiskNames));
}
runInstancesRequest.withBlockDeviceMappings(blockDeviceMappings);
} catch (Exception e) {
aws.error = e;
aws.stage = AWSInstanceStage.ERROR;
handleAllocation(aws);
return;
}
}
AWSNicContext primaryNic = aws.getPrimaryNic();
if (primaryNic != null && primaryNic.nicSpec != null) {
runInstancesRequest.withNetworkInterfaces(primaryNic.nicSpec);
} else {
runInstancesRequest.withSecurityGroupIds(AWSUtils.getOrCreateSecurityGroups(aws, null));
}
if (cloudConfig != null) {
try {
runInstancesRequest.setUserData(Base64.getEncoder().encodeToString(cloudConfig.getBytes(Utils.CHARSET)));
} catch (UnsupportedEncodingException e) {
handleError(aws, new IllegalStateException("Error encoding user data"));
return;
}
}
String message = "[AWSInstanceService] Sending run instance request for instance id: " + aws.bootDiskImageNativeId + ", instance type: " + instanceType + ", parent task id: " + aws.computeRequest.taskReference;
this.logInfo(() -> message);
// handler invoked once the EC2 runInstancesAsync commands completes
AsyncHandler<RunInstancesRequest, RunInstancesResult> creationHandler = new AWSCreationHandler(this, aws);
aws.amazonEC2Client.runInstancesAsync(runInstancesRequest, creationHandler);
}
use of com.vmware.photon.controller.model.resources.DiskService.DiskState in project photon-model by vmware.
the class AzureDiskEnumerationAdapterService method createLocalDiskState.
/**
* Construct DiskState object from a Disk
*/
private DiskState createLocalDiskState(DiskEnumContext ctx, Disk disk) {
DiskState diskState = new DiskState();
String id = UUID.randomUUID().toString();
diskState.documentSelfLink = UriUtils.buildUriPath(DiskService.FACTORY_LINK, id);
diskState.name = disk.name;
diskState.id = disk.id;
diskState.capacityMBytes = (long) disk.properties.diskSizeGB * 1024;
diskState.status = DiskStatus.AVAILABLE;
diskState.tenantLinks = ctx.parentCompute.tenantLinks;
diskState.resourcePoolLink = ctx.request.resourcePoolLink;
diskState.computeHostLink = ctx.parentCompute.documentSelfLink;
diskState.authCredentialsLink = ctx.endpointAuth.documentSelfLink;
diskState.endpointLink = ctx.request.endpointLink;
AdapterUtils.addToEndpointLinks(diskState, ctx.request.endpointLink);
diskState.regionId = disk.location;
if (diskState.tagLinks == null) {
diskState.tagLinks = new HashSet<>();
}
// add internal type tags
diskState.tagLinks.addAll(ctx.internalTagLinks);
diskState.customProperties = new HashMap<>();
diskState.customProperties.put(AZURE_MANAGED_DISK_TYPE, disk.properties.accountType);
return diskState;
}
use of com.vmware.photon.controller.model.resources.DiskService.DiskState in project photon-model by vmware.
the class AzureDiskEnumerationAdapterService method createUpdateDiskStates.
private void createUpdateDiskStates(DiskEnumContext ctx, DiskEnumStages nextStage) {
if (ctx.unattachedDisks.isEmpty()) {
ctx.subStage = nextStage;
handleSubStage(ctx);
return;
}
Collection<Operation> opCollection = new ArrayList<>();
ctx.unattachedDisks.entrySet().stream().forEach(entry -> {
DiskState diskState = ctx.localDiskStates.get(entry.getKey());
Operation diskOp = null;
if (diskState != null) {
diskState.status = DiskStatus.AVAILABLE;
if (diskState.endpointLinks != null && !diskState.endpointLinks.contains(ctx.request.endpointLink)) {
AdapterUtils.addToEndpointLinks(diskState, ctx.request.endpointLink);
}
if (diskState.endpointLink == null || diskState.endpointLink.equals("")) {
diskState.endpointLink = ctx.request.endpointLink;
}
if (diskState.customProperties != null && diskState.customProperties.containsKey(DISK_CONTROLLER_NUMBER)) {
diskState.customProperties.remove(DISK_CONTROLLER_NUMBER);
}
if (diskState.tagLinks == null) {
diskState.tagLinks = new HashSet<>();
}
diskState.tagLinks.addAll(ctx.internalTagLinks);
diskState.regionId = entry.getValue().location;
diskOp = Operation.createPatch(createInventoryUri(getHost(), diskState.documentSelfLink)).setBody(diskState);
} else {
diskState = createLocalDiskState(ctx, entry.getValue());
diskOp = Operation.createPost(createInventoryUri(getHost(), DiskService.FACTORY_LINK)).setBody(diskState);
}
ctx.localDiskStates.put(diskState.id, diskState);
opCollection.add(diskOp);
});
if (opCollection.isEmpty()) {
ctx.subStage = nextStage;
handleSubStage(ctx);
return;
}
logInfo(() -> "Create and/or update disk states in local document store.");
OperationJoin.create(opCollection).setCompletion((ops, exs) -> {
if (exs != null) {
exs.values().forEach(ex -> logWarning(() -> String.format("Error: %s", ex.getMessage())));
return;
}
logFine(() -> String.format("Transition to " + nextStage));
ctx.subStage = nextStage;
handleSubStage(ctx);
}).sendWith(this);
}
use of com.vmware.photon.controller.model.resources.DiskService.DiskState in project photon-model by vmware.
the class AzureComputeEnumerationAdapterService method updateDataDiskProperties.
private void updateDataDiskProperties(EnumerationContext ctx, VirtualMachineInner vm, Collection<Operation> opCollection) {
if (vm.storageProfile() == null) {
return;
}
if (vm.storageProfile().dataDisks() != null) {
vm.storageProfile().dataDisks().forEach(dataDisk -> {
DiskState diskToUpdate = null;
if (AzureUtils.isDiskManaged(vm)) {
diskToUpdate = ctx.diskStates.get(dataDisk.managedDisk().id());
} else {
diskToUpdate = ctx.diskStates.get(AzureUtils.canonizeId(dataDisk.vhd().uri()));
}
Operation diskToUpdateOp = null;
if (null == diskToUpdate) {
diskToUpdate = createDataDiskState(ctx, dataDisk, AzureUtils.isDiskManaged(vm));
diskToUpdate.regionId = vm.location();
diskToUpdateOp = Operation.createPost(getHost(), DiskService.FACTORY_LINK).setBody(diskToUpdate);
} else {
// case where a lying disk in Azure (also in local store) is attached to VM
diskToUpdate.status = DiskService.DiskStatus.ATTACHED;
diskToUpdate.regionId = vm.location();
diskToUpdateOp = Operation.createPatch(getHost(), diskToUpdate.documentSelfLink).setBody(diskToUpdate);
}
opCollection.add(diskToUpdateOp);
ctx.diskStates.put(diskToUpdate.id, diskToUpdate);
});
}
}
Aggregations