use of com.vmware.photon.controller.model.adapters.azure.constants.AzureConstants.DISK_CONTROLLER_NUMBER in project photon-model by vmware.
the class AzureInstanceService method updateDiskStates.
/**
* Update {@code computeState.diskState[i].id} with Azure Disks' VHD URI.
*/
private DeferredResult<AzureInstanceContext> updateDiskStates(AzureInstanceContext ctx) {
if (ctx.provisionedVm == null) {
// Do nothing.
return DeferredResult.completed(ctx);
}
List<DeferredResult<Operation>> diskStateDRs = new ArrayList<>();
// Update boot DiskState with Azure osDisk VHD URI in case of unmanaged disks and
// disks id in case of managed disks
{
final OSDisk azureOsDisk = ctx.provisionedVm.storageProfile().osDisk();
final DiskState diskStateToUpdate = new DiskState();
diskStateToUpdate.documentSelfLink = ctx.bootDiskState.documentSelfLink;
diskStateToUpdate.persistent = ctx.bootDiskState.persistent;
diskStateToUpdate.regionId = ctx.provisionedVm.location();
diskStateToUpdate.endpointLink = ctx.endpoint.documentSelfLink;
AdapterUtils.addToEndpointLinks(diskStateToUpdate, ctx.endpoint.documentSelfLink);
// The actual value being updated
if (ctx.useManagedDisks()) {
diskStateToUpdate.id = azureOsDisk.managedDisk().id();
} else {
diskStateToUpdate.id = AzureUtils.canonizeId(azureOsDisk.vhd().uri());
}
diskStateToUpdate.status = DiskService.DiskStatus.ATTACHED;
Operation updateDiskState = Operation.createPatch(ctx.service, diskStateToUpdate.documentSelfLink).setBody(diskStateToUpdate);
DeferredResult<Operation> updateDR = ctx.service.sendWithDeferredResult(updateDiskState).whenComplete((op, exc) -> {
if (exc != null) {
logSevere(() -> String.format("Updating boot DiskState [%s] with VHD URI [%s]: FAILED with %s", ctx.bootDiskState.name, diskStateToUpdate.id, Utils.toString(exc)));
} else {
logFine(() -> String.format("Updating boot DiskState [%s] with VHD URI [%s]: SUCCESS", ctx.bootDiskState.name, diskStateToUpdate.id));
}
});
diskStateDRs.add(updateDR);
}
for (DataDisk azureDataDisk : ctx.provisionedVm.storageProfile().dataDisks()) {
// Find corresponding DiskState by name
Optional<DiskState> dataDiskOpt = ctx.dataDiskStates.stream().map(DiskState.class::cast).filter(dS -> azureDataDisk.name().equals(dS.name)).findFirst();
Optional<DiskState> externalDiskOpt = ctx.externalDataDisks.stream().map(DiskState.class::cast).filter(dS -> azureDataDisk.name().equals(dS.name)).findFirst();
// update disk state
if (dataDiskOpt.isPresent()) {
diskStateDRs.add(createDiskToUpdate(ctx, dataDiskOpt, azureDataDisk));
} else if (externalDiskOpt.isPresent()) {
diskStateDRs.add(createDiskToUpdate(ctx, externalDiskOpt, azureDataDisk));
} else {
// additional disks were created by the custom image. Will always be of
// managed disk type. Create new disk state.
DiskState diskStateToCreate = new DiskState();
diskStateToCreate.id = azureDataDisk.managedDisk().id();
diskStateToCreate.name = azureDataDisk.name();
diskStateToCreate.regionId = ctx.provisionedVm.location();
diskStateToCreate.customProperties = new HashMap<>();
diskStateToCreate.customProperties.put(DISK_CONTROLLER_NUMBER, String.valueOf(azureDataDisk.lun()));
if (azureDataDisk.managedDisk().storageAccountType() != null) {
diskStateToCreate.customProperties.put(AZURE_MANAGED_DISK_TYPE, azureDataDisk.managedDisk().storageAccountType().toString());
} else {
// set to Standard_LRS default
diskStateToCreate.customProperties.put(AZURE_MANAGED_DISK_TYPE, StorageAccountTypes.STANDARD_LRS.toString());
}
diskStateToCreate.customProperties.put(AZURE_DATA_DISK_CACHING, azureDataDisk.caching().toString());
if (azureDataDisk.diskSizeGB() != null) {
diskStateToCreate.capacityMBytes = azureDataDisk.diskSizeGB() * 1024;
}
diskStateToCreate.status = DiskService.DiskStatus.ATTACHED;
diskStateToCreate.persistent = ctx.bootDiskState.persistent;
diskStateToCreate.endpointLink = ctx.endpoint.documentSelfLink;
AdapterUtils.addToEndpointLinks(diskStateToCreate, ctx.endpoint.documentSelfLink);
Operation createDiskState = Operation.createPost(ctx.service, DiskService.FACTORY_LINK).setBody(diskStateToCreate);
DeferredResult<Operation> createDR = ctx.service.sendWithDeferredResult(createDiskState).whenComplete((op, exc) -> {
if (exc != null) {
logSevere(() -> String.format("Creating data DiskState [%s] with disk id [%s]: FAILED " + "with %s", azureDataDisk.name(), azureDataDisk.managedDisk().id(), Utils.toString(exc)));
} else {
logFine(() -> String.format("Creating data DiskState [%s] with disk id [%s]: SUCCESS", azureDataDisk.name(), azureDataDisk.managedDisk().id()));
// update compute state with data disks present on custom image
ComputeState cs = new ComputeState();
List<String> disksLinks = new ArrayList<>();
DiskState diskState = op.getBody(DiskState.class);
disksLinks.add(diskState.documentSelfLink);
cs.diskLinks = disksLinks;
Operation.CompletionHandler completionHandler = (ox, ex) -> {
if (ex != null) {
handleError(ctx, ex);
return;
}
};
sendRequest(Operation.createPatch(ctx.computeRequest.resourceReference).setBody(cs).setCompletion(completionHandler));
}
});
diskStateDRs.add(createDR);
}
}
return DeferredResult.allOf(diskStateDRs).thenApply(ignored -> ctx);
}
use of com.vmware.photon.controller.model.adapters.azure.constants.AzureConstants.DISK_CONTROLLER_NUMBER in project photon-model by vmware.
the class TestAzureProvisionTask method testProvisionDataDisksAndEnumeration.
/**
* Creates Azure instance with 2 data disks via provision task and enumerate the disks
* and verify disk duplication in local store.
*/
@Test
public void testProvisionDataDisksAndEnumeration() throws Throwable {
ImageSource imageSource = createImageSource(getHost(), this.endpointState, IMAGE_REFERENCE);
// Create a Azure VM compute resource with 2 additional disks.
int numberOfAdditionalDisks = 2;
VMResourceSpec vmResourceSpec = new VMResourceSpec(getHost(), this.computeHost, this.endpointState, azureVMName).withImageSource(imageSource).withNicSpecs(DEFAULT_NIC_SPEC).withNumberOfAdditionalDisks(numberOfAdditionalDisks).withManagedDisk(false);
// create Azure VM compute resource.
this.vmState = createVMResourceFromSpec(vmResourceSpec);
kickOffProvisionTask();
runEnumeration();
// Assert if 2 additional disks were created
List<DiskState> diskStates = this.vmState.diskLinks.stream().map(diskLink -> getHost().getServiceState(null, DiskState.class, UriUtils.buildUri(getHost(), diskLink))).collect(Collectors.toList());
for (DiskState diskState : diskStates) {
if (diskState.bootOrder == 1) {
assertEquals("OS Disk size does not match", AzureTestUtil.AZURE_CUSTOM_OSDISK_SIZE, diskState.capacityMBytes);
} else {
assertEquals("Data Disk size does not match", AzureTestUtil.AZURE_CUSTOM_DATA_DISK_SIZE, diskState.capacityMBytes);
if (!this.isMock) {
assertNotNull(diskState.customProperties);
assertNotNull(diskState.customProperties.get(DISK_CONTROLLER_NUMBER));
}
}
}
// Run enumeration second time to verify disk states are not duplicated
runEnumeration();
ServiceDocumentQueryResult result = ProvisioningUtils.queryAllFactoryResources(this.host, DiskService.FACTORY_LINK);
List<DiskState> diskList = result.documents.keySet().stream().map(diskLink -> getHost().getServiceState(null, DiskState.class, UriUtils.buildUri(getHost(), diskLink))).collect(Collectors.toList());
for (DiskState diskState : diskStates) {
long nameCount = diskList.stream().filter(ds -> ds.name.equalsIgnoreCase(diskState.name)).count();
String msg = String.format("Duplicate of DiskState %s must not be present. ", diskState.name);
assertEquals(msg, 1, nameCount);
long idCount = diskList.stream().filter(ds -> ds.id.equalsIgnoreCase(diskState.id)).count();
String idMsg = String.format("Duplicate of DiskState ID %s must not be present. ", diskState.id);
assertEquals(idMsg, 1, idCount);
}
}
use of com.vmware.photon.controller.model.adapters.azure.constants.AzureConstants.DISK_CONTROLLER_NUMBER in project photon-model by vmware.
the class TestAzureProvisionTask method assertConfigurationOfDisks.
private void assertConfigurationOfDisks(int numberOfAdditionalDisks, int numberOfDataDisksOnImage) {
ComputeState vm = getHost().getServiceState(null, ComputeState.class, UriUtils.buildUri(getHost(), this.vmState.documentSelfLink));
List<DiskState> diskStates = vm.diskLinks.stream().map(diskLink -> getHost().getServiceState(null, DiskState.class, UriUtils.buildUri(getHost(), diskLink))).collect(Collectors.toList());
if (numberOfDataDisksOnImage == 0) {
for (DiskState diskState : diskStates) {
if (diskState.bootOrder == 1) {
assertEquals("OS Disk size does not match", AzureTestUtil.AZURE_CUSTOM_OSDISK_SIZE, diskState.capacityMBytes);
} else {
assertEquals("Data Disk size does not match", AzureTestUtil.AZURE_CUSTOM_DATA_DISK_SIZE, diskState.capacityMBytes);
assertNotNull(diskState.customProperties);
assertNotNull(diskState.customProperties.get(DISK_CONTROLLER_NUMBER));
}
}
}
if (this.isMock) {
// return. Nothing to check on Azure.
return;
}
final String vmRGName = vm.customProperties.get(ComputeProperties.RESOURCE_GROUP_NAME);
VirtualMachineInner provisionedVM = null;
try {
provisionedVM = AzureTestUtil.getAzureVirtualMachine(getAzureSdkClients().getComputeManagementClientImpl(), vmRGName, this.vmState.name.replace('_', '-'));
} catch (Exception e) {
fail("Unable to get Azure VM details: " + e.getMessage());
}
final Function<String, Optional<DiskState>> findDiskStateByName = diskName -> diskStates.stream().filter(dS -> diskName.equals(dS.name)).findFirst();
// Validate boot DiskState against Azure osDisk
{
final OSDisk azureOsDisk = provisionedVM.storageProfile().osDisk();
Optional<DiskState> bootDiskOpt = findDiskStateByName.apply(azureOsDisk.name());
if (bootDiskOpt.isPresent()) {
final DiskState bootDiskState = bootDiskOpt.get();
assertNotNull("Azure OS Disk with name '" + azureOsDisk.name() + "' does not match any DiskState by name", bootDiskState);
if (bootDiskState.customProperties != null && bootDiskState.customProperties.containsKey(AzureConstants.AZURE_MANAGED_DISK_TYPE)) {
assertEquals("Boot DiskState.id does not match Azure managed disk id", azureOsDisk.managedDisk().id(), bootDiskState.id);
} else {
assertEquals("Boot DiskState.id does not match Azure.osDisk.vhd.uri", AzureUtils.canonizeId(azureOsDisk.vhd().uri()), bootDiskState.id);
}
assertEquals("OS Disk size of the VM in azure does not match with the intended size", AzureTestUtil.AZURE_CUSTOM_OSDISK_SIZE, azureOsDisk.diskSizeGB() * 1024);
} else {
fail("Mismatch in boot disk name.");
}
}
for (DataDisk azureDataDisk : provisionedVM.storageProfile().dataDisks()) {
Optional<DiskState> dataDiskOpt = findDiskStateByName.apply(azureDataDisk.name());
if (dataDiskOpt.isPresent()) {
DiskState dataDiskState = dataDiskOpt.get();
assertNotNull("Azure Data Disk with name '" + azureDataDisk.name() + "' does not match any DiskState by name", dataDiskState);
if (dataDiskState.customProperties != null && dataDiskState.customProperties.containsKey(AzureConstants.AZURE_MANAGED_DISK_TYPE)) {
assertEquals("Data Disk State id does not match Azure managed disk id.", azureDataDisk.managedDisk().id(), dataDiskState.id);
} else {
assertEquals("Data Disk State id does not match Azure DataDisk.vhd.uri", AzureUtils.canonizeId(azureDataDisk.vhd().uri()), dataDiskState.id);
}
// assert size of each of the attached disks only in case of public image
if (numberOfDataDisksOnImage == 0) {
assertEquals("Mismatch in intended size of data disks " + azureDataDisk.name(), AZURE_CUSTOM_DATA_DISK_SIZE, azureDataDisk.diskSizeGB().longValue() * 1024);
}
assertEquals("LUN of DiskState does not match Azure.dataDisk.lun", String.valueOf(azureDataDisk.lun()), dataDiskState.customProperties.get(DISK_CONTROLLER_NUMBER));
} else {
fail("Data Disks not found.");
}
}
assertEquals("Mismatch in number of data disks found on VM in azure", numberOfAdditionalDisks + numberOfDataDisksOnImage, provisionedVM.storageProfile().dataDisks().size());
}
use of com.vmware.photon.controller.model.adapters.azure.constants.AzureConstants.DISK_CONTROLLER_NUMBER in project photon-model by vmware.
the class AzureDiskEnumerationAdapterService method createUpdateDiskStates.
private void createUpdateDiskStates(DiskEnumContext ctx, DiskEnumStages nextStage) {
if (ctx.unattachedDisks.isEmpty()) {
ctx.subStage = nextStage;
handleSubStage(ctx);
return;
}
Collection<Operation> opCollection = new ArrayList<>();
ctx.unattachedDisks.entrySet().stream().forEach(entry -> {
DiskState diskState = ctx.localDiskStates.get(entry.getKey());
Operation diskOp = null;
if (diskState != null) {
diskState.status = DiskStatus.AVAILABLE;
if (diskState.endpointLinks != null && !diskState.endpointLinks.contains(ctx.request.endpointLink)) {
AdapterUtils.addToEndpointLinks(diskState, ctx.request.endpointLink);
}
if (diskState.endpointLink == null || diskState.endpointLink.equals("")) {
diskState.endpointLink = ctx.request.endpointLink;
}
if (diskState.customProperties != null && diskState.customProperties.containsKey(DISK_CONTROLLER_NUMBER)) {
diskState.customProperties.remove(DISK_CONTROLLER_NUMBER);
}
if (diskState.tagLinks == null) {
diskState.tagLinks = new HashSet<>();
}
diskState.tagLinks.addAll(ctx.internalTagLinks);
diskState.regionId = entry.getValue().location;
diskOp = Operation.createPatch(createInventoryUri(getHost(), diskState.documentSelfLink)).setBody(diskState);
} else {
diskState = createLocalDiskState(ctx, entry.getValue());
diskOp = Operation.createPost(createInventoryUri(getHost(), DiskService.FACTORY_LINK)).setBody(diskState);
}
ctx.localDiskStates.put(diskState.id, diskState);
opCollection.add(diskOp);
});
if (opCollection.isEmpty()) {
ctx.subStage = nextStage;
handleSubStage(ctx);
return;
}
logInfo(() -> "Create and/or update disk states in local document store.");
OperationJoin.create(opCollection).setCompletion((ops, exs) -> {
if (exs != null) {
exs.values().forEach(ex -> logWarning(() -> String.format("Error: %s", ex.getMessage())));
return;
}
logFine(() -> String.format("Transition to " + nextStage));
ctx.subStage = nextStage;
handleSubStage(ctx);
}).sendWith(this);
}
Aggregations