use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
@Override
public Answer execute(DestroyCommand cmd) {
try {
VmwareContext context = getServiceContext(null);
VmwareHypervisorHost hyperHost = getHyperHost(context, null);
VolumeTO vol = cmd.getVolume();
VirtualMachineMO vmMo = findVmOnDatacenter(context, hyperHost, vol);
if (vmMo != null) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Destroy template volume " + vol.getPath());
}
if (vmMo.isTemplate()) {
vmMo.markAsVirtualMachine(hyperHost.getHyperHostOwnerResourcePool(), hyperHost.getMor());
}
vmMo.destroy();
} else {
if (s_logger.isInfoEnabled()) {
s_logger.info("Template volume " + vol.getPath() + " is not found, no need to delete.");
}
}
return new Answer(cmd, true, "Success");
} catch (Throwable e) {
return new Answer(cmd, false, createLogMessageException(e, cmd));
}
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
protected StartAnswer execute(StartCommand cmd) {
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
boolean vmAlreadyExistsInVcenter = false;
String existingVmName = null;
VirtualMachineFileInfo existingVmFileInfo = null;
VirtualMachineFileLayoutEx existingVmFileLayout = null;
List<DatastoreMO> existingDatastores = new ArrayList<DatastoreMO>();
String diskStoragePolicyId = null;
String vmStoragePolicyId = null;
VirtualMachineDefinedProfileSpec diskProfileSpec = null;
VirtualMachineDefinedProfileSpec vmProfileSpec = null;
DeployAsIsInfoTO deployAsIsInfo = vmSpec.getDeployAsIsInfo();
boolean deployAsIs = deployAsIsInfo != null;
Pair<String, String> names = composeVmNames(vmSpec);
String vmInternalCSName = names.first();
String vmNameOnVcenter = names.second();
DiskTO rootDiskTO = null;
String bootMode = getBootModeFromVmSpec(vmSpec, deployAsIs);
Pair<String, String> controllerInfo = getControllerInfoFromVmSpec(vmSpec);
Boolean systemVm = vmSpec.getType().isUsedBySystem();
// Thus, vmInternalCSName always holds i-x-y, the cloudstack generated internal VM name.
VmwareContext context = getServiceContext();
DatacenterMO dcMo = null;
try {
VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
VmwareHypervisorHost hyperHost = getHyperHost(context);
dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
// Validate VM name is unique in Datacenter
VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName);
if (vmInVcenter != null) {
vmAlreadyExistsInVcenter = true;
String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter.";
s_logger.error(msg);
throw new Exception(msg);
}
DiskTO[] specDisks = vmSpec.getDisks();
String guestOsId = getGuestOsIdFromVmSpec(vmSpec, deployAsIs);
DiskTO[] disks = validateDisks(vmSpec.getDisks());
assert (disks.length > 0);
NicTO[] nics = vmSpec.getNics();
HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> dataStoresDetails = inferDatastoreDetailsFromDiskInfo(hyperHost, context, disks, cmd);
if ((dataStoresDetails == null) || (dataStoresDetails.isEmpty())) {
String msg = "Unable to locate datastore details of the volumes to be attached";
s_logger.error(msg);
throw new Exception(msg);
}
VirtualMachineDiskInfoBuilder diskInfoBuilder = null;
VirtualDevice[] nicDevices = null;
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
DiskControllerType systemVmScsiControllerType = DiskControllerType.lsilogic;
int firstScsiControllerBusNum = 0;
int numScsiControllerForSystemVm = 1;
boolean hasSnapshot = false;
List<Pair<Integer, ManagedObjectReference>> diskDatastores = null;
if (vmMo != null) {
s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration");
if (getVmPowerState(vmMo) != PowerState.PowerOff)
vmMo.safePowerOff(_shutdownWaitMs);
// retrieve disk information before we tear down
diskDatastores = vmMo.getAllDiskDatastores();
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
nicDevices = vmMo.getNicDevices();
tearDownVmDevices(vmMo, hasSnapshot, deployAsIs);
ensureDiskControllersInternal(vmMo, systemVm, controllerInfo, systemVmScsiControllerType, numScsiControllerForSystemVm, firstScsiControllerBusNum, deployAsIs);
} else {
ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter();
assert (morDc != null);
vmMo = hyperHost.findVmOnPeerHyperHost(vmInternalCSName);
if (vmMo != null) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Found vm " + vmInternalCSName + " at other host, relocate to " + hyperHost.getHyperHostName());
}
takeVmFromOtherHyperHost(hyperHost, vmInternalCSName);
if (getVmPowerState(vmMo) != PowerState.PowerOff)
vmMo.safePowerOff(_shutdownWaitMs);
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
diskDatastores = vmMo.getAllDiskDatastores();
tearDownVmDevices(vmMo, hasSnapshot, deployAsIs);
ensureDiskControllersInternal(vmMo, systemVm, controllerInfo, systemVmScsiControllerType, numScsiControllerForSystemVm, firstScsiControllerBusNum, deployAsIs);
} else {
// If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration).
VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName);
if (existingVmInDc != null) {
s_logger.debug("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM.");
existingVmName = existingVmInDc.getName();
existingVmFileInfo = existingVmInDc.getFileInfo();
existingVmFileLayout = existingVmInDc.getFileLayout();
existingDatastores = existingVmInDc.getAllDatastores();
existingVmInDc.unregisterVm();
}
if (deployAsIs) {
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo == null) {
s_logger.info("Cloned deploy-as-is VM " + vmInternalCSName + " is not in this host, relocating it");
vmMo = takeVmFromOtherHyperHost(hyperHost, vmInternalCSName);
}
} else {
DiskTO rootDisk = null;
for (DiskTO vol : disks) {
if (vol.getType() == Volume.Type.ROOT) {
rootDisk = vol;
}
}
Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails = getDatastoreThatDiskIsOn(dataStoresDetails, rootDisk);
assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null);
DatastoreMO dsRootVolumeIsOn = rootDiskDataStoreDetails.second();
if (dsRootVolumeIsOn == null) {
String msg = "Unable to locate datastore details of root volume";
s_logger.error(msg);
throw new Exception(msg);
}
if (rootDisk.getDetails().get(DiskTO.PROTOCOL_TYPE) != null && rootDisk.getDetails().get(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase(Storage.StoragePoolType.DatastoreCluster.toString())) {
if (diskInfoBuilder != null) {
DatastoreMO diskDatastoreMofromVM = getDataStoreWhereDiskExists(hyperHost, context, diskInfoBuilder, rootDisk, diskDatastores);
if (diskDatastoreMofromVM != null) {
String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID);
if (!actualPoolUuid.equalsIgnoreCase(rootDisk.getData().getDataStore().getUuid())) {
dsRootVolumeIsOn = diskDatastoreMofromVM;
}
}
}
}
boolean vmFolderExists = dsRootVolumeIsOn.folderExists(String.format("[%s]", dsRootVolumeIsOn.getName()), vmNameOnVcenter);
String vmxFileFullPath = dsRootVolumeIsOn.searchFileInSubFolders(vmNameOnVcenter + ".vmx", false, VmwareManager.s_vmwareSearchExcludeFolder.value());
if (vmFolderExists && vmxFileFullPath != null) {
// VM can be registered only if .vmx is present.
registerVm(vmNameOnVcenter, dsRootVolumeIsOn);
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName());
}
}
tearDownVm(vmMo);
} else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int) (vmSpec.getMaxRam() / ResourceType.bytesToMiB), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) {
throw new Exception("Failed to create VM. vmName: " + vmInternalCSName);
}
}
}
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo == null) {
throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName);
}
}
if (deployAsIs) {
s_logger.info("Mapping VM disks to spec disks and tearing down datadisks (if any)");
mapSpecDisksToClonedDisksAndTearDownDatadisks(vmMo, vmInternalCSName, specDisks);
}
int disksChanges = getDisksChangesNumberFromDisksSpec(disks, deployAsIs);
int totalChangeDevices = disksChanges + nics.length;
if (deployAsIsInfo != null && deployAsIsInfo.getProperties() != null) {
totalChangeDevices++;
}
DiskTO volIso = null;
if (vmSpec.getType() != VirtualMachine.Type.User) {
// system VM needs a patch ISO
totalChangeDevices++;
} else {
volIso = getIsoDiskTO(disks);
if (volIso == null && !deployAsIs) {
totalChangeDevices++;
}
}
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
int i = 0;
int ideUnitNumber = !deployAsIs ? 0 : vmMo.getNextIDEDeviceNumber();
int scsiUnitNumber = !deployAsIs ? 0 : vmMo.getNextScsiDiskDeviceNumber();
int ideControllerKey = vmMo.getIDEDeviceControllerKey();
int scsiControllerKey = vmMo.getScsiDeviceControllerKeyNoException();
VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices];
DiskTO[] sortedDisks = sortVolumesByDeviceId(disks);
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), (int) (vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse(), deployAsIs);
// Check for multi-cores per socket settings
int numCoresPerSocket = 1;
String coresPerSocket = vmSpec.getDetails().get(VmDetailConstants.CPU_CORE_PER_SOCKET);
if (coresPerSocket != null) {
String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext());
// Property 'numCoresPerSocket' is supported since vSphere API 5.0
if (apiVersion.compareTo("5.0") >= 0) {
numCoresPerSocket = NumbersUtil.parseInt(coresPerSocket, 1);
vmConfigSpec.setNumCoresPerSocket(numCoresPerSocket);
}
}
// Check for hotadd settings
vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm());
String hostApiVersion = ((HostMO) hyperHost).getHostAboutInfo().getApiVersion();
if (numCoresPerSocket > 1 && hostApiVersion.compareTo("5.0") < 0) {
s_logger.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" + " enabled for Virtual Machine: " + vmInternalCSName);
vmConfigSpec.setCpuHotAddEnabled(false);
} else {
vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm());
}
if (!vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()) {
s_logger.warn("hotadd of memory is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName);
}
if (!vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()) {
s_logger.warn("hotadd of cpu is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName);
}
configNestedHVSupport(vmMo, vmSpec, vmConfigSpec);
// prepare systemvm patch ISO
if (vmSpec.getType() != VirtualMachine.Type.User) {
// attach ISO (for patching of system VM)
Pair<String, Long> secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId));
String secStoreUrl = secStoreUrlAndId.first();
Long secStoreId = secStoreUrlAndId.second();
if (secStoreUrl == null) {
String msg = "secondary storage for dc " + _dcId + " is not ready yet?";
throw new Exception(msg);
}
mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId);
ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl);
if (morSecDs == null) {
String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl;
throw new Exception(msg);
}
DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs);
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, ideUnitNumber++, i + 1);
deviceConfigSpecArray[i].setDevice(isoInfo.first());
if (isoInfo.second()) {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
} else {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
}
i++;
} else if (!deployAsIs) {
// Note: we will always plug a CDROM device
if (volIso != null) {
for (DiskTO vol : disks) {
if (vol.getType() == Volume.Type.ISO) {
configureIso(hyperHost, vmMo, vol, deviceConfigSpecArray, ideUnitNumber++, i);
i++;
}
}
} else {
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, i + 1);
deviceConfigSpecArray[i].setDevice(isoInfo.first());
if (isoInfo.second()) {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
} else {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
}
i++;
}
}
int controllerKey;
//
if (multipleIsosAtached(sortedDisks) && deployAsIs) {
sortedDisks = getDisks(sortedDisks);
}
for (DiskTO vol : sortedDisks) {
if (vol.getType() == Volume.Type.ISO) {
if (deployAsIs) {
configureIso(hyperHost, vmMo, vol, deviceConfigSpecArray, ideUnitNumber++, i);
i++;
}
continue;
}
if (deployAsIs && vol.getType() == Volume.Type.ROOT) {
rootDiskTO = vol;
resizeRootDiskOnVMStart(vmMo, rootDiskTO, hyperHost, context);
continue;
}
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context);
String diskController = getDiskController(vmMo, matchingExistingDisk, vol, controllerInfo, deployAsIs);
if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) {
diskController = vmMo.getRecommendedDiskController(null);
}
if (DiskControllerType.getType(diskController) == DiskControllerType.ide) {
controllerKey = vmMo.getIDEControllerKey(ideUnitNumber);
if (vol.getType() == Volume.Type.DATADISK) {
// Ensure maximum of 2 data volumes over IDE controller, 3 includeing root volume
if (vmMo.getNumberOfVirtualDisks() > 3) {
throw new CloudRuntimeException("Found more than 3 virtual disks attached to this VM [" + vmMo.getVmName() + "]. Unable to implement the disks over " + diskController + " controller, as maximum number of devices supported over IDE controller is 4 includeing CDROM device.");
}
}
} else {
if (VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) {
scsiUnitNumber++;
}
controllerKey = vmMo.getScsiDiskControllerKeyNoException(diskController, scsiUnitNumber);
if (controllerKey == -1) {
// This may happen for ROOT legacy VMs which doesn't have recommended disk controller when global configuration parameter 'vmware.root.disk.controller' is set to "osdefault"
// Retrieve existing controller and use.
Ternary<Integer, Integer, DiskControllerType> vmScsiControllerInfo = vmMo.getScsiControllerInfo();
DiskControllerType existingControllerType = vmScsiControllerInfo.third();
controllerKey = vmMo.getScsiDiskControllerKeyNoException(existingControllerType.toString(), scsiUnitNumber);
}
}
if (!hasSnapshot) {
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData();
DataStoreTO primaryStore = volumeTO.getDataStore();
Map<String, String> details = vol.getDetails();
boolean managed = false;
String iScsiName = null;
if (details != null) {
managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED));
iScsiName = details.get(DiskTO.IQN);
}
String primaryStoreUuid = primaryStore.getUuid();
// if the storage is managed, iScsiName should not be null
String datastoreName = managed ? VmwareResource.getDatastoreName(iScsiName) : primaryStoreUuid;
Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(datastoreName);
assert (volumeDsDetails != null);
if (volumeDsDetails == null) {
throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host.");
}
if (vol.getDetails().get(DiskTO.PROTOCOL_TYPE) != null && vol.getDetails().get(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase("DatastoreCluster")) {
if (diskInfoBuilder != null && matchingExistingDisk != null) {
String[] diskChain = matchingExistingDisk.getDiskChain();
if (diskChain != null && diskChain.length > 0) {
DatastoreFile file = new DatastoreFile(diskChain[0]);
if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) {
if (s_logger.isInfoEnabled())
s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName());
volumeTO.setPath(file.getFileBaseName());
}
}
DatastoreMO diskDatastoreMofromVM = getDataStoreWhereDiskExists(hyperHost, context, diskInfoBuilder, vol, diskDatastores);
if (diskDatastoreMofromVM != null) {
String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID);
if (actualPoolUuid != null && !actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) {
volumeDsDetails = new Pair<>(diskDatastoreMofromVM.getMor(), diskDatastoreMofromVM);
if (s_logger.isInfoEnabled())
s_logger.info("Detected datastore uuid change on volume: " + volumeTO.getId() + " " + primaryStore.getUuid() + " -> " + actualPoolUuid);
((PrimaryDataStoreTO) primaryStore).setUuid(actualPoolUuid);
}
}
}
}
String[] diskChain = syncDiskChain(dcMo, vmMo, vol, matchingExistingDisk, volumeDsDetails.second());
int deviceNumber = -1;
if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) {
deviceNumber = ideUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_IDE_CONTROLLER;
ideUnitNumber++;
} else {
deviceNumber = scsiUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_SCSI_CONTROLLER;
scsiUnitNumber++;
}
VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), deviceNumber, i + 1);
diskStoragePolicyId = volumeTO.getvSphereStoragePolicyId();
if (StringUtils.isNotEmpty(diskStoragePolicyId)) {
PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(context);
diskProfileSpec = profMgrMo.getProfileSpec(diskStoragePolicyId);
deviceConfigSpecArray[i].getProfile().add(diskProfileSpec);
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Adding vSphere storage profile: %s to virtual disk [%s]", diskStoragePolicyId, _gson.toJson(device)));
}
}
if (vol.getType() == Volume.Type.ROOT) {
rootDiskTO = vol;
vmStoragePolicyId = diskStoragePolicyId;
vmProfileSpec = diskProfileSpec;
}
deviceConfigSpecArray[i].setDevice(device);
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare volume at new device " + _gson.toJson(device));
i++;
} else {
if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber))
ideUnitNumber++;
else
scsiUnitNumber++;
}
}
//
if (StringUtils.isNotBlank(guestOsId) && guestOsId.startsWith("darwin")) {
// Mac OS
VirtualDevice[] devices = vmMo.getMatchedDevices(new Class<?>[] { VirtualUSBController.class });
if (devices.length == 0) {
s_logger.debug("No USB Controller device on VM Start. Add USB Controller device for Mac OS VM " + vmInternalCSName);
// For Mac OS X systems, the EHCI+UHCI controller is enabled by default and is required for USB mouse and keyboard access.
VirtualDevice usbControllerDevice = VmwareHelper.prepareUSBControllerDevice();
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
deviceConfigSpecArray[i].setDevice(usbControllerDevice);
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare USB controller at new device " + _gson.toJson(deviceConfigSpecArray[i]));
i++;
} else {
s_logger.debug("USB Controller device exists on VM Start for Mac OS VM " + vmInternalCSName);
}
}
//
// Setup NIC devices
//
VirtualDevice nic;
int nicMask = 0;
int nicCount = 0;
VirtualEthernetCardType nicDeviceType;
NiciraNvpApiVersion.logNiciraApiVersion();
Map<String, String> nicUuidToDvSwitchUuid = new HashMap<String, String>();
for (NicTO nicTo : sortNicsByDeviceId(nics)) {
s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo));
String adapterTypeStr = deployAsIs ? mapAdapterType(deployAsIsInfo.getNicAdapterMap().get(nicTo.getDeviceId())) : vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER);
nicDeviceType = VirtualEthernetCardType.valueOf(adapterTypeStr);
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM " + vmInternalCSName + " will be started with NIC device type: " + nicDeviceType + " on NIC device " + nicTo.getDeviceId());
}
boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus"));
VirtualMachine.Type vmType = cmd.getVirtualMachine().getType();
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType);
if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) || (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) {
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
String dvSwitchUuid;
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor);
ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first());
dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor);
s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid);
nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), i + 1, true, true);
if (nicTo.getUuid() != null) {
nicUuidToDvSwitchUuid.put(nicTo.getUuid(), dvSwitchUuid);
}
} else {
s_logger.info("Preparing NIC device on network " + networkInfo.second());
nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), i + 1, true, true);
}
} else {
// if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour
nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(), nicTo.getMac(), i + 1, true, true);
}
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
deviceConfigSpecArray[i].setDevice(nic);
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i]));
// this is really a hacking for DomR, upon DomR startup, we will reset all the NIC allocation after eth3
if (nicCount < 3)
nicMask |= (1 << nicCount);
i++;
nicCount++;
}
for (int j = 0; j < i; j++) vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]);
//
// Setup VM options
//
// pass boot arguments through machine.id & perform customized options to VMX
ArrayList<OptionValue> extraOptions = new ArrayList<OptionValue>();
configBasicExtraOption(extraOptions, vmSpec);
if (deployAsIs) {
setDeployAsIsProperties(vmMo, deployAsIsInfo, vmConfigSpec, hyperHost);
}
configNvpExtraOption(extraOptions, vmSpec, nicUuidToDvSwitchUuid);
configCustomExtraOption(extraOptions, vmSpec);
// config for NCC
VirtualMachine.Type vmType = cmd.getVirtualMachine().getType();
if (vmType.equals(VirtualMachine.Type.NetScalerVm)) {
NicTO mgmtNic = vmSpec.getNics()[0];
OptionValue option = new OptionValue();
option.setKey("machine.id");
option.setValue("ip=" + mgmtNic.getIp() + "&netmask=" + mgmtNic.getNetmask() + "&gateway=" + mgmtNic.getGateway());
extraOptions.add(option);
}
configureVNC(vmSpec, extraOptions, vmConfigSpec, hyperHost, vmInternalCSName);
// config video card
configureVideoCard(vmMo, vmSpec, vmConfigSpec);
setBootOptions(vmSpec, bootMode, vmConfigSpec);
if (StringUtils.isNotEmpty(vmStoragePolicyId)) {
vmConfigSpec.getVmProfile().add(vmProfileSpec);
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("Configuring the VM %s with storage policy: %s", vmInternalCSName, vmStoragePolicyId));
}
}
//
if (!vmMo.configureVm(vmConfigSpec)) {
throw new Exception("Failed to configure VM before start. vmName: " + vmInternalCSName);
}
if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) {
hyperHost.setRestartPriorityForVM(vmMo, DasVmPriority.HIGH.value());
}
// Resizing root disk only when explicit requested by user
final Map<String, String> vmDetails = cmd.getVirtualMachine().getDetails();
if (!deployAsIs && rootDiskTO != null && !hasSnapshot && (vmDetails != null && vmDetails.containsKey(ApiConstants.ROOT_DISK_SIZE))) {
resizeRootDiskOnVMStart(vmMo, rootDiskTO, hyperHost, context);
}
//
// Post Configuration
//
vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask));
postNvpConfigBeforeStart(vmMo, vmSpec);
Map<String, Map<String, String>> iqnToData = new HashMap<>();
postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToData, hyperHost, context);
//
if (!vmMo.powerOn()) {
throw new Exception("Failed to start VM. vmName: " + vmInternalCSName + " with hostname " + vmNameOnVcenter);
}
StartAnswer startAnswer = new StartAnswer(cmd);
startAnswer.setIqnToData(iqnToData);
// Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it.
if (existingVmName != null && existingVmFileLayout != null) {
List<String> vmDatastoreNames = new ArrayList<String>();
for (DatastoreMO vmDatastore : vmMo.getAllDatastores()) {
vmDatastoreNames.add(vmDatastore.getName());
}
// Don't delete files that are in a datastore that is being used by the new VM as well (zone-wide datastore).
List<String> skipDatastores = new ArrayList<String>();
for (DatastoreMO existingDatastore : existingDatastores) {
if (vmDatastoreNames.contains(existingDatastore.getName())) {
skipDatastores.add(existingDatastore.getName());
}
}
deleteUnregisteredVmFiles(existingVmFileLayout, dcMo, true, skipDatastores);
}
return startAnswer;
} catch (Throwable e) {
StartAnswer startAnswer = new StartAnswer(cmd, createLogMessageException(e, cmd));
if (vmAlreadyExistsInVcenter) {
startAnswer.setContextParam("stopRetry", "true");
}
if (existingVmName != null && existingVmFileInfo != null) {
s_logger.debug(String.format("Since VM start failed, registering back an existing VM: [%s] that was unregistered.", existingVmName));
try {
DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName());
DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
registerVm(existingVmName, existingVmDsMo);
} catch (Exception ex) {
String message = String.format("Failed to register an existing VM: [%s] due to [%s].", existingVmName, VmwareHelper.getExceptionMessage(ex));
s_logger.error(message, ex);
}
}
return startAnswer;
}
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method getCurrentStatus.
@Override
public PingCommand getCurrentStatus(long id) {
try {
gcAndKillHungWorkerVMs();
VmwareContext context = getServiceContext();
VmwareHypervisorHost hyperHost = getHyperHost(context);
try {
if (!hyperHost.isHyperHostConnected()) {
return null;
}
} catch (Exception e) {
s_logger.error("Unexpected exception", e);
return null;
}
return new PingRoutingCommand(getType(), id, syncHostVmStates());
} finally {
recycleServiceContext();
}
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method migrateAndAnswer.
private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception {
String hostNameInTargetCluster = null;
List<Pair<VolumeTO, StorageFilerTO>> volToFiler = new ArrayList<>();
if (cmd instanceof MigrateVmToPoolCommand) {
MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand) cmd;
hostNameInTargetCluster = mcmd.getHostGuidInTargetCluster();
volToFiler = mcmd.getVolumeToFilerAsList();
} else if (cmd instanceof MigrateVolumeCommand) {
hostNameInTargetCluster = ((MigrateVolumeCommand) cmd).getHostGuidInTargetCluster();
}
VmwareHypervisorHost hostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), hostNameInTargetCluster);
try {
// OfflineVmwareMigration: getVolumesFromCommand(cmd);
Map<Integer, Long> volumeDeviceKey = new HashMap<>();
if (cmd instanceof MigrateVolumeCommand) {
// Else device keys will be found in relocateVirtualMachine
MigrateVolumeCommand mcmd = (MigrateVolumeCommand) cmd;
addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId());
if (s_logger.isTraceEnabled()) {
for (Integer diskId : volumeDeviceKey.keySet()) {
s_logger.trace(String.format("Disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId)));
}
}
}
List<VolumeObjectTO> volumeToList = relocateVirtualMachine(hyperHost, vmMo.getName(), null, null, hostInTargetCluster, poolUuid, volToFiler);
return createAnswerForCmd(vmMo, volumeToList, cmd, volumeDeviceKey);
} catch (Exception e) {
String msg = "Change data store for VM " + vmMo.getVmName() + " failed";
s_logger.error(msg + ": " + e.getLocalizedMessage());
throw new CloudRuntimeException(msg, e);
}
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
// OfflineVmwareMigration: refactor to be able to handle a detached volume
private Answer execute(MigrateVolumeCommand cmd) {
String volumePath = cmd.getVolumePath();
String chainInfo = cmd.getChainInfo();
StorageFilerTO poolTo = cmd.getPool();
VolumeObjectTO volumeObjectTO = (VolumeObjectTO) cmd.getSrcData();
String vmName = cmd.getAttachedVmName();
VirtualMachineMO vmMo = null;
VmwareHypervisorHost srcHyperHost = null;
// OfflineVmwareMigration: ifhost is null ???
if (StringUtils.isBlank(cmd.getAttachedVmName())) {
return migrateVolume(cmd);
}
ManagedObjectReference morDs = null;
ManagedObjectReference morDc = null;
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>();
VirtualMachineRelocateSpecDiskLocator diskLocator = null;
String tgtDsName = "";
try {
srcHyperHost = getHyperHost(getServiceContext());
morDc = srcHyperHost.getHyperHostDatacenter();
tgtDsName = poolTo.getUuid();
// find VM in this datacenter not just in this cluster.
DatacenterMO dcMo = new DatacenterMO(getServiceContext(), morDc);
vmMo = dcMo.findVm(vmName);
if (vmMo == null) {
String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
vmName = vmMo.getName();
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
if (morDs == null) {
String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() + " to execute MigrateVolumeCommand";
s_logger.error(msg);
throw new Exception(msg);
}
DatastoreMO targetDsMo = new DatastoreMO(srcHyperHost.getContext(), morDs);
if (cmd.getContextParam(DiskTO.PROTOCOL_TYPE) != null && cmd.getContextParam(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase("DatastoreCluster")) {
VmwareContext context = getServiceContext();
VmwareHypervisorHost hyperHost = getHyperHost(context);
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDiskWithVolumeDetails(diskInfoBuilder, volumePath, chainInfo, false, null, poolTo.getUuid(), hyperHost, context);
if (diskInfoBuilder != null && matchingExistingDisk != null) {
String[] diskChain = matchingExistingDisk.getDiskChain();
DatastoreFile file = new DatastoreFile(diskChain[0]);
if (!file.getFileBaseName().equalsIgnoreCase(volumePath)) {
if (s_logger.isInfoEnabled())
s_logger.info("Detected disk-chain top file change on volume: " + volumePath + " -> " + file.getFileBaseName());
volumePath = file.getFileBaseName();
}
}
}
String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + VMDK_EXTENSION);
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, VMDK_EXTENSION));
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
}
int diskId = diskInfo.first().getKey();
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
diskLocator.setDatastore(morDs);
diskLocator.setDiskId(diskId);
diskLocators.add(diskLocator);
if (cmd.getVolumeType() == Volume.Type.ROOT) {
relocateSpec.setDatastore(morDs);
// If a target datastore is provided for the VM, then by default all volumes associated with the VM will be migrated to that target datastore.
// Hence set the existing datastore as target datastore for volumes that are not to be migrated.
List<Pair<Integer, ManagedObjectReference>> diskDatastores = vmMo.getAllDiskDatastores();
for (Pair<Integer, ManagedObjectReference> diskDatastore : diskDatastores) {
if (diskDatastore.first().intValue() != diskId) {
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
diskLocator.setDiskId(diskDatastore.first().intValue());
diskLocator.setDatastore(diskDatastore.second());
diskLocators.add(diskLocator);
}
}
}
relocateSpec.getDisk().addAll(diskLocators);
// Change datastore
if (!vmMo.changeDatastore(relocateSpec)) {
throw new Exception("Change datastore operation failed during volume migration");
} else {
s_logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName);
}
// further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies.
if (!vmMo.consolidateVmDisks()) {
s_logger.warn("VM disk consolidation failed after storage migration.");
} else {
s_logger.debug("Successfully consolidated disks of VM " + vmName + ".");
}
// Update and return volume path and chain info because that could have changed after migration
if (!targetDsMo.fileExists(fullVolumePath)) {
VirtualDisk[] disks = vmMo.getAllDiskDevice();
for (VirtualDisk disk : disks) if (disk.getKey() == diskId) {
volumePath = vmMo.getVmdkFileBaseName(disk);
}
}
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
chainInfo = _gson.toJson(diskInfoBuilder.getDiskInfoByBackingFileBaseName(volumePath, targetDsMo.getName()));
MigrateVolumeAnswer answer = new MigrateVolumeAnswer(cmd, true, null, volumePath);
answer.setVolumeChainInfo(chainInfo);
return answer;
} catch (Exception e) {
String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString();
s_logger.error(msg, e);
return new MigrateVolumeAnswer(cmd, false, msg, null);
}
}
Aggregations