use of com.cloud.hypervisor.vmware.mo.DatastoreMO in project cloudstack by apache.
the class VmwareResource method getMatchingExistingDisk.
private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
if (diskInfoBuilder != null) {
VolumeObjectTO volume = (VolumeObjectTO) vol.getData();
String dsName = null;
String diskBackingFileBaseName = null;
Map<String, String> details = vol.getDetails();
boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
if (isManaged) {
String iScsiName = details.get(DiskTO.IQN);
// if the storage is managed, iScsiName should not be null
dsName = VmwareResource.getDatastoreName(iScsiName);
diskBackingFileBaseName = new DatastoreFile(volume.getPath()).getFileBaseName();
} else {
ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, volume.getDataStore().getUuid());
DatastoreMO dsMo = new DatastoreMO(context, morDs);
dsName = dsMo.getName();
diskBackingFileBaseName = volume.getPath();
}
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName);
if (diskInfo != null) {
s_logger.info("Found existing disk info from volume path: " + volume.getPath());
return diskInfo;
} else {
String chainInfo = volume.getChainInfo();
if (chainInfo != null) {
VirtualMachineDiskInfo infoInChain = _gson.fromJson(chainInfo, VirtualMachineDiskInfo.class);
if (infoInChain != null) {
String[] disks = infoInChain.getDiskChain();
if (disks.length > 0) {
for (String diskPath : disks) {
DatastoreFile file = new DatastoreFile(diskPath);
diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName);
if (diskInfo != null) {
s_logger.info("Found existing disk from chain info: " + diskPath);
return diskInfo;
}
}
}
if (diskInfo == null) {
diskInfo = diskInfoBuilder.getDiskInfoByDeviceBusName(infoInChain.getDiskDeviceBusName());
if (diskInfo != null) {
s_logger.info("Found existing disk from from chain device bus information: " + infoInChain.getDiskDeviceBusName());
return diskInfo;
}
}
}
}
}
}
return null;
}
use of com.cloud.hypervisor.vmware.mo.DatastoreMO in project cloudstack by apache.
the class VmwareResource method execute.
protected StartAnswer execute(StartCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource StartCommand: " + _gson.toJson(cmd));
}
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
boolean vmAlreadyExistsInVcenter = false;
String existingVmName = null;
VirtualMachineFileInfo existingVmFileInfo = null;
VirtualMachineFileLayoutEx existingVmFileLayout = null;
Pair<String, String> names = composeVmNames(vmSpec);
String vmInternalCSName = names.first();
String vmNameOnVcenter = names.second();
String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER);
String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER);
DiskTO rootDiskTO = null;
// This helps avoid mix of different scsi subtype controllers in instance.
if (DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
dataDiskController = DiskControllerType.scsi.toString();
}
// Validate the controller types
dataDiskController = DiskControllerType.getType(dataDiskController).toString();
rootDiskController = DiskControllerType.getType(rootDiskController).toString();
if (DiskControllerType.getType(rootDiskController) == DiskControllerType.none) {
throw new CloudRuntimeException("Invalid root disk controller detected : " + rootDiskController);
}
if (DiskControllerType.getType(dataDiskController) == DiskControllerType.none) {
throw new CloudRuntimeException("Invalid data disk controller detected : " + dataDiskController);
}
Pair<String, String> controllerInfo = new Pair<String, String>(rootDiskController, dataDiskController);
Boolean systemVm = vmSpec.getType().isUsedBySystem();
// Thus, vmInternalCSName always holds i-x-y, the cloudstack generated internal VM name.
VmwareContext context = getServiceContext();
DatacenterMO dcMo = null;
try {
VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
VmwareHypervisorHost hyperHost = getHyperHost(context);
dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
// Validate VM name is unique in Datacenter
VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName);
if (vmInVcenter != null) {
vmAlreadyExistsInVcenter = true;
String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter.";
s_logger.error(msg);
throw new Exception(msg);
}
String guestOsId = translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs(), vmSpec.getPlatformEmulator()).value();
DiskTO[] disks = validateDisks(vmSpec.getDisks());
assert (disks.length > 0);
NicTO[] nics = vmSpec.getNics();
HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> dataStoresDetails = inferDatastoreDetailsFromDiskInfo(hyperHost, context, disks, cmd);
if ((dataStoresDetails == null) || (dataStoresDetails.isEmpty())) {
String msg = "Unable to locate datastore details of the volumes to be attached";
s_logger.error(msg);
throw new Exception(msg);
}
DatastoreMO dsRootVolumeIsOn = getDatastoreThatRootDiskIsOn(dataStoresDetails, disks);
if (dsRootVolumeIsOn == null) {
String msg = "Unable to locate datastore details of root volume";
s_logger.error(msg);
throw new Exception(msg);
}
VirtualMachineDiskInfoBuilder diskInfoBuilder = null;
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
DiskControllerType systemVmScsiControllerType = DiskControllerType.lsilogic;
int firstScsiControllerBusNum = 0;
int numScsiControllerForSystemVm = 1;
boolean hasSnapshot = false;
if (vmMo != null) {
s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration");
if (getVmPowerState(vmMo) != PowerState.PowerOff)
vmMo.safePowerOff(_shutdownWaitMs);
// retrieve disk information before we tear down
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
if (!hasSnapshot)
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
else
vmMo.tearDownDevices(new Class<?>[] { VirtualEthernetCard.class });
if (systemVm) {
ensureScsiDiskControllers(vmMo, systemVmScsiControllerType.toString(), numScsiControllerForSystemVm, firstScsiControllerBusNum);
} else {
ensureDiskControllers(vmMo, controllerInfo);
}
} else {
ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter();
assert (morDc != null);
vmMo = hyperHost.findVmOnPeerHyperHost(vmInternalCSName);
if (vmMo != null) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Found vm " + vmInternalCSName + " at other host, relocate to " + hyperHost.getHyperHostName());
}
takeVmFromOtherHyperHost(hyperHost, vmInternalCSName);
if (getVmPowerState(vmMo) != PowerState.PowerOff)
vmMo.safePowerOff(_shutdownWaitMs);
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
if (!hasSnapshot)
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
else
vmMo.tearDownDevices(new Class<?>[] { VirtualEthernetCard.class });
if (systemVm) {
// System volumes doesn't require more than 1 SCSI controller as there is no requirement for data volumes.
ensureScsiDiskControllers(vmMo, systemVmScsiControllerType.toString(), numScsiControllerForSystemVm, firstScsiControllerBusNum);
} else {
ensureDiskControllers(vmMo, controllerInfo);
}
} else {
// If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration).
VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName);
if (existingVmInDc != null) {
s_logger.debug("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM.");
existingVmName = existingVmInDc.getName();
existingVmFileInfo = existingVmInDc.getFileInfo();
existingVmFileLayout = existingVmInDc.getFileLayout();
existingVmInDc.unregisterVm();
}
Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails = null;
for (DiskTO vol : disks) {
if (vol.getType() == Volume.Type.ROOT) {
Map<String, String> details = vol.getDetails();
boolean managed = false;
if (details != null) {
managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED));
}
if (managed) {
String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN));
rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName);
} else {
DataStoreTO primaryStore = vol.getData().getDataStore();
rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid());
}
}
}
assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null);
boolean vmFolderExists = rootDiskDataStoreDetails.second().folderExists(String.format("[%s]", rootDiskDataStoreDetails.second().getName()), vmNameOnVcenter);
String vmxFileFullPath = dsRootVolumeIsOn.searchFileInSubFolders(vmNameOnVcenter + ".vmx", false);
if (vmFolderExists && vmxFileFullPath != null) {
// VM can be registered only if .vmx is present.
registerVm(vmNameOnVcenter, dsRootVolumeIsOn);
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName());
}
}
tearDownVm(vmMo);
} else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int) (vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) {
throw new Exception("Failed to create VM. vmName: " + vmInternalCSName);
}
}
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo == null) {
throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName);
}
}
int totalChangeDevices = disks.length + nics.length;
DiskTO volIso = null;
if (vmSpec.getType() != VirtualMachine.Type.User) {
// system VM needs a patch ISO
totalChangeDevices++;
} else {
volIso = getIsoDiskTO(disks);
if (volIso == null)
totalChangeDevices++;
}
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), (int) (vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse());
// Check for multi-cores per socket settings
int numCoresPerSocket = 1;
String coresPerSocket = vmSpec.getDetails().get("cpu.corespersocket");
if (coresPerSocket != null) {
String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext());
// Property 'numCoresPerSocket' is supported since vSphere API 5.0
if (apiVersion.compareTo("5.0") >= 0) {
numCoresPerSocket = NumbersUtil.parseInt(coresPerSocket, 1);
vmConfigSpec.setNumCoresPerSocket(numCoresPerSocket);
}
}
// Check for hotadd settings
vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId));
String hostApiVersion = ((HostMO) hyperHost).getHostAboutInfo().getApiVersion();
if (numCoresPerSocket > 1 && hostApiVersion.compareTo("5.0") < 0) {
s_logger.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" + " enabled for Virtual Machine: " + vmInternalCSName);
vmConfigSpec.setCpuHotAddEnabled(false);
} else {
vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId));
}
configNestedHVSupport(vmMo, vmSpec, vmConfigSpec);
VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices];
int i = 0;
int ideUnitNumber = 0;
int scsiUnitNumber = 0;
int nicUnitNumber = 0;
int ideControllerKey = vmMo.getIDEDeviceControllerKey();
int scsiControllerKey = vmMo.getGenericScsiDeviceControllerKeyNoException();
int controllerKey;
// prepare systemvm patch ISO
if (vmSpec.getType() != VirtualMachine.Type.User) {
// attach ISO (for patching of system VM)
Pair<String, Long> secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId));
String secStoreUrl = secStoreUrlAndId.first();
Long secStoreId = secStoreUrlAndId.second();
if (secStoreUrl == null) {
String msg = "secondary storage for dc " + _dcId + " is not ready yet?";
throw new Exception(msg);
}
mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId);
ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl);
if (morSecDs == null) {
String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl;
throw new Exception(msg);
}
DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs);
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, ideUnitNumber++, i + 1);
deviceConfigSpecArray[i].setDevice(isoInfo.first());
if (isoInfo.second()) {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
} else {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
}
} else {
// Note: we will always plug a CDROM device
if (volIso != null) {
TemplateObjectTO iso = (TemplateObjectTO) volIso.getData();
if (iso.getPath() != null && !iso.getPath().isEmpty()) {
DataStoreTO imageStore = iso.getDataStore();
if (!(imageStore instanceof NfsTO)) {
s_logger.debug("unsupported protocol");
throw new Exception("unsupported protocol");
}
NfsTO nfsImageStore = (NfsTO) imageStore;
String isoPath = nfsImageStore.getUrl() + File.separator + iso.getPath();
Pair<String, ManagedObjectReference> isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, isoPath);
assert (isoDatastoreInfo != null);
assert (isoDatastoreInfo.second() != null);
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1);
deviceConfigSpecArray[i].setDevice(isoInfo.first());
if (isoInfo.second()) {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
} else {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
}
}
} else {
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, i + 1);
deviceConfigSpecArray[i].setDevice(isoInfo.first());
if (isoInfo.second()) {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
} else {
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
}
}
}
i++;
//
// Setup ROOT/DATA disk devices
//
DiskTO[] sortedDisks = sortVolumesByDeviceId(disks);
for (DiskTO vol : sortedDisks) {
if (vol.getType() == Volume.Type.ISO)
continue;
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context);
controllerKey = getDiskController(matchingExistingDisk, vol, vmSpec, ideControllerKey, scsiControllerKey);
String diskController = getDiskController(vmMo, matchingExistingDisk, vol, new Pair<String, String>(rootDiskController, dataDiskController));
if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) {
diskController = vmMo.getRecommendedDiskController(null);
}
if (DiskControllerType.getType(diskController) == DiskControllerType.ide) {
controllerKey = vmMo.getIDEControllerKey(ideUnitNumber);
if (vol.getType() == Volume.Type.DATADISK) {
// Ensure maximum of 2 data volumes over IDE controller, 3 includeing root volume
if (vmMo.getNumberOfVirtualDisks() > 3) {
throw new CloudRuntimeException("Found more than 3 virtual disks attached to this VM [" + vmMo.getVmName() + "]. Unable to implement the disks over " + diskController + " controller, as maximum number of devices supported over IDE controller is 4 includeing CDROM device.");
}
}
} else {
controllerKey = vmMo.getScsiDiskControllerKeyNoException(diskController);
if (controllerKey == -1) {
// This may happen for ROOT legacy VMs which doesn't have recommended disk controller when global configuration parameter 'vmware.root.disk.controller' is set to "osdefault"
// Retrieve existing controller and use.
Ternary<Integer, Integer, DiskControllerType> vmScsiControllerInfo = vmMo.getScsiControllerInfo();
DiskControllerType existingControllerType = vmScsiControllerInfo.third();
controllerKey = vmMo.getScsiDiskControllerKeyNoException(existingControllerType.toString());
}
}
if (!hasSnapshot) {
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData();
DataStoreTO primaryStore = volumeTO.getDataStore();
Map<String, String> details = vol.getDetails();
boolean managed = false;
String iScsiName = null;
if (details != null) {
managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED));
iScsiName = details.get(DiskTO.IQN);
}
// if the storage is managed, iScsiName should not be null
String datastoreName = managed ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid();
Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(datastoreName);
assert (volumeDsDetails != null);
String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, vol, matchingExistingDisk, dataStoresDetails);
if (controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
scsiUnitNumber++;
VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) ? ((ideUnitNumber++) % VmwareHelper.MAX_IDE_CONTROLLER_COUNT) : scsiUnitNumber++, i + 1);
if (vol.getType() == Volume.Type.ROOT)
rootDiskTO = vol;
deviceConfigSpecArray[i].setDevice(device);
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare volume at new device " + _gson.toJson(device));
i++;
} else {
if (controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
scsiUnitNumber++;
if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber))
ideUnitNumber++;
else
scsiUnitNumber++;
}
}
//
if (guestOsId.startsWith("darwin")) {
//Mac OS
VirtualDevice[] devices = vmMo.getMatchedDevices(new Class<?>[] { VirtualUSBController.class });
if (devices.length == 0) {
s_logger.debug("No USB Controller device on VM Start. Add USB Controller device for Mac OS VM " + vmInternalCSName);
//For Mac OS X systems, the EHCI+UHCI controller is enabled by default and is required for USB mouse and keyboard access.
VirtualDevice usbControllerDevice = VmwareHelper.prepareUSBControllerDevice();
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
deviceConfigSpecArray[i].setDevice(usbControllerDevice);
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare USB controller at new device " + _gson.toJson(deviceConfigSpecArray[i]));
i++;
} else {
s_logger.debug("USB Controller device exists on VM Start for Mac OS VM " + vmInternalCSName);
}
}
//
// Setup NIC devices
//
VirtualDevice nic;
int nicMask = 0;
int nicCount = 0;
VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.valueOf(vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER));
if (s_logger.isDebugEnabled())
s_logger.debug("VM " + vmInternalCSName + " will be started with NIC device type: " + nicDeviceType);
NiciraNvpApiVersion.logNiciraApiVersion();
Map<String, String> nicUuidToDvSwitchUuid = new HashMap<String, String>();
for (NicTO nicTo : sortNicsByDeviceId(nics)) {
s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo));
boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus"));
VirtualMachine.Type vmType = cmd.getVirtualMachine().getType();
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType);
if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) || (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) {
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
String dvSwitchUuid;
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor);
ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first());
dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor);
s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid);
nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), nicUnitNumber++, i + 1, true, true);
if (nicTo.getUuid() != null) {
nicUuidToDvSwitchUuid.put(nicTo.getUuid(), dvSwitchUuid);
}
} else {
s_logger.info("Preparing NIC device on network " + networkInfo.second());
nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), nicUnitNumber++, i + 1, true, true);
}
} else {
//if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour
nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(), nicTo.getMac(), nicUnitNumber++, i + 1, true, true);
}
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
deviceConfigSpecArray[i].setDevice(nic);
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
if (s_logger.isDebugEnabled())
s_logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i]));
// this is really a hacking for DomR, upon DomR startup, we will reset all the NIC allocation after eth3
if (nicCount < 3)
nicMask |= (1 << nicCount);
i++;
nicCount++;
}
for (int j = 0; j < i; j++) vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]);
//
// Setup VM options
//
// pass boot arguments through machine.id & perform customized options to VMX
ArrayList<OptionValue> extraOptions = new ArrayList<OptionValue>();
configBasicExtraOption(extraOptions, vmSpec);
configNvpExtraOption(extraOptions, vmSpec, nicUuidToDvSwitchUuid);
configCustomExtraOption(extraOptions, vmSpec);
// config VNC
String keyboardLayout = null;
if (vmSpec.getDetails() != null)
keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD);
vmConfigSpec.getExtraConfig().addAll(Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout)));
// config video card
configureVideoCard(vmMo, vmSpec, vmConfigSpec);
//
if (!vmMo.configureVm(vmConfigSpec)) {
throw new Exception("Failed to configure VM before start. vmName: " + vmInternalCSName);
}
if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) {
hyperHost.setRestartPriorityForVM(vmMo, DasVmPriority.HIGH.value());
}
//For resizing root disk.
if (rootDiskTO != null && !hasSnapshot) {
resizeRootDisk(vmMo, rootDiskTO, hyperHost, context);
}
//
// Post Configuration
//
vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask));
postNvpConfigBeforeStart(vmMo, vmSpec);
Map<String, String> iqnToPath = new HashMap<String, String>();
postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToPath, hyperHost, context);
//
if (!vmMo.powerOn()) {
throw new Exception("Failed to start VM. vmName: " + vmInternalCSName + " with hostname " + vmNameOnVcenter);
}
StartAnswer startAnswer = new StartAnswer(cmd);
startAnswer.setIqnToPath(iqnToPath);
// Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it.
if (existingVmName != null && existingVmFileLayout != null) {
deleteUnregisteredVmFiles(existingVmFileLayout, dcMo, true);
}
return startAnswer;
} catch (Throwable e) {
if (e instanceof RemoteException) {
s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context");
invalidateServiceContext();
}
String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e);
s_logger.warn(msg, e);
StartAnswer startAnswer = new StartAnswer(cmd, msg);
if (vmAlreadyExistsInVcenter) {
startAnswer.setContextParam("stopRetry", "true");
}
// Since VM start failed, if there was an existing VM in a different cluster that was unregistered, register it back.
if (existingVmName != null && existingVmFileInfo != null) {
s_logger.debug("Since VM start failed, registering back an existing VM: " + existingVmName + " that was unregistered");
try {
DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName());
DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
registerVm(existingVmName, existingVmDsMo);
} catch (Exception ex) {
String message = "Failed to register an existing VM: " + existingVmName + " due to " + VmwareHelper.getExceptionMessage(ex);
s_logger.warn(message, ex);
}
}
return startAnswer;
} finally {
}
}
use of com.cloud.hypervisor.vmware.mo.DatastoreMO in project cloudstack by apache.
the class VmwareResource method execute.
private Answer execute(MigrateVolumeCommand cmd) {
String volumePath = cmd.getVolumePath();
StorageFilerTO poolTo = cmd.getPool();
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd));
}
String vmName = cmd.getAttachedVmName();
VirtualMachineMO vmMo = null;
VmwareHypervisorHost srcHyperHost = null;
ManagedObjectReference morDs = null;
ManagedObjectReference morDc = null;
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>();
VirtualMachineRelocateSpecDiskLocator diskLocator = null;
String tgtDsName = "";
try {
srcHyperHost = getHyperHost(getServiceContext());
morDc = srcHyperHost.getHyperHostDatacenter();
tgtDsName = poolTo.getUuid();
// find VM in this datacenter not just in this cluster.
DatacenterMO dcMo = new DatacenterMO(getServiceContext(), morDc);
vmMo = dcMo.findVm(vmName);
if (vmMo == null) {
String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
s_logger.error(msg);
throw new Exception(msg);
}
vmName = vmMo.getName();
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
if (morDs == null) {
String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() + " to execute MigrateVolumeCommand";
s_logger.error(msg);
throw new Exception(msg);
}
DatastoreMO targetDsMo = new DatastoreMO(srcHyperHost.getContext(), morDs);
String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + ".vmdk");
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, volumePath + ".vmdk");
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
}
int diskId = diskInfo.first().getKey();
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
diskLocator.setDatastore(morDs);
diskLocator.setDiskId(diskId);
diskLocators.add(diskLocator);
if (cmd.getVolumeType() == Volume.Type.ROOT) {
relocateSpec.setDatastore(morDs);
// If a target datastore is provided for the VM, then by default all volumes associated with the VM will be migrated to that target datastore.
// Hence set the existing datastore as target datastore for volumes that are not to be migrated.
List<Pair<Integer, ManagedObjectReference>> diskDatastores = vmMo.getAllDiskDatastores();
for (Pair<Integer, ManagedObjectReference> diskDatastore : diskDatastores) {
if (diskDatastore.first().intValue() != diskId) {
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
diskLocator.setDiskId(diskDatastore.first().intValue());
diskLocator.setDatastore(diskDatastore.second());
diskLocators.add(diskLocator);
}
}
}
relocateSpec.getDisk().addAll(diskLocators);
// Change datastore
if (!vmMo.changeDatastore(relocateSpec)) {
throw new Exception("Change datastore operation failed during volume migration");
} else {
s_logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName);
}
// further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies.
if (!vmMo.consolidateVmDisks()) {
s_logger.warn("VM disk consolidation failed after storage migration.");
} else {
s_logger.debug("Successfully consolidated disks of VM " + vmName + ".");
}
// Update and return volume path and chain info because that could have changed after migration
if (!targetDsMo.fileExists(fullVolumePath)) {
VirtualDisk[] disks = vmMo.getAllDiskDevice();
for (VirtualDisk disk : disks) if (disk.getKey() == diskId) {
volumePath = vmMo.getVmdkFileBaseName(disk);
}
}
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
String chainInfo = _gson.toJson(diskInfoBuilder.getDiskInfoByBackingFileBaseName(volumePath, poolTo.getUuid().replace("-", "")));
MigrateVolumeAnswer answer = new MigrateVolumeAnswer(cmd, true, null, volumePath);
answer.setVolumeChainInfo(chainInfo);
return answer;
} catch (Exception e) {
String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString();
s_logger.error(msg, e);
return new MigrateVolumeAnswer(cmd, false, msg, null);
}
}
use of com.cloud.hypervisor.vmware.mo.DatastoreMO in project cloudstack by apache.
the class VmwareResource method execute.
protected Answer execute(GetStorageStatsCommand cmd) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Executing resource GetStorageStatsCommand: " + _gson.toJson(cmd));
}
try {
VmwareContext context = getServiceContext();
VmwareHypervisorHost hyperHost = getHyperHost(context);
ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getStorageId());
if (morDs != null) {
DatastoreMO datastoreMo = new DatastoreMO(context, morDs);
DatastoreSummary summary = datastoreMo.getSummary();
assert (summary != null);
long capacity = summary.getCapacity();
long free = summary.getFreeSpace();
long used = capacity - free;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() + ", capacity: " + capacity + ", free: " + free + ", used: " + used);
}
if (summary.getCapacity() <= 0) {
s_logger.warn("Something is wrong with vSphere NFS datastore, rebooting ESX(ESXi) host should help");
}
return new GetStorageStatsAnswer(cmd, capacity, used);
} else {
String msg = "Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype();
s_logger.error(msg);
return new GetStorageStatsAnswer(cmd, msg);
}
} catch (Throwable e) {
if (e instanceof RemoteException) {
s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context");
invalidateServiceContext();
}
String msg = "Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() + ") due to " + VmwareHelper.getExceptionMessage(e);
s_logger.error(msg, e);
return new GetStorageStatsAnswer(cmd, msg);
}
}
use of com.cloud.hypervisor.vmware.mo.DatastoreMO in project cloudstack by apache.
the class VmwareStorageManagerImpl method execute.
@Override
@Deprecated
public Answer execute(VmwareHostService hostService, BackupSnapshotCommand cmd) {
Long accountId = cmd.getAccountId();
Long volumeId = cmd.getVolumeId();
String secondaryStorageUrl = cmd.getSecondaryStorageUrl();
// not null: Precondition.
String snapshotUuid = cmd.getSnapshotUuid();
String prevSnapshotUuid = cmd.getPrevSnapshotUuid();
String prevBackupUuid = cmd.getPrevBackupUuid();
VirtualMachineMO workerVm = null;
String workerVMName = null;
String volumePath = cmd.getVolumePath();
ManagedObjectReference morDs = null;
DatastoreMO dsMo = null;
// By default assume failure
String details = null;
boolean success = false;
String snapshotBackupUuid = null;
VmwareContext context = hostService.getServiceContext(cmd);
VirtualMachineMO vmMo = null;
try {
VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd);
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPool().getUuid());
try {
vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName());
if (vmMo == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter");
}
vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName());
if (vmMo == null) {
dsMo = new DatastoreMO(hyperHost.getContext(), morDs);
workerVMName = hostService.getWorkerName(context, cmd, 0);
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVMName);
if (vmMo == null) {
throw new Exception("Failed to find the newly create or relocated VM. vmName: " + workerVMName);
}
workerVm = vmMo;
// attach volume to worker VM
String datastoreVolumePath = getVolumePathInDatastore(dsMo, volumePath + ".vmdk");
vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs);
}
}
if (!vmMo.createSnapshot(snapshotUuid, "Snapshot taken for " + cmd.getSnapshotName(), false, false)) {
throw new Exception("Failed to take snapshot " + cmd.getSnapshotName() + " on vm: " + cmd.getVmName());
}
snapshotBackupUuid = backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid, hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion());
success = (snapshotBackupUuid != null);
if (success) {
details = "Successfully backedUp the snapshotUuid: " + snapshotUuid + " to secondary storage.";
}
} finally {
if (vmMo != null) {
ManagedObjectReference snapshotMor = vmMo.getSnapshotMor(snapshotUuid);
if (snapshotMor != null) {
vmMo.removeSnapshot(snapshotUuid, false);
}
}
try {
if (workerVm != null) {
// detach volume and destroy worker vm
workerVm.detachAllDisks();
workerVm.destroy();
}
} catch (Throwable e) {
s_logger.warn("Failed to destroy worker VM: " + workerVMName);
}
}
} catch (Throwable e) {
if (e instanceof RemoteException) {
hostService.invalidateServiceContext(context);
}
s_logger.error("Unexpecpted exception ", e);
details = "BackupSnapshotCommand exception: " + StringUtils.getExceptionStackInfo(e);
return new BackupSnapshotAnswer(cmd, false, details, snapshotBackupUuid, true);
}
return new BackupSnapshotAnswer(cmd, success, details, snapshotBackupUuid, true);
}
Aggregations