use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
protected Answer execute(PrepareForMigrationCommand cmd) {
VirtualMachineTO vm = cmd.getVirtualMachine();
final String vmName = vm.getName();
try {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
VmwareManager mgr = hyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
// find VM through datacenter (VM is not at the target host yet)
VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
if (vmMo == null) {
s_logger.info("VM " + vmName + " was not found in the cluster of host " + hyperHost.getHyperHostName() + ". Looking for the VM in datacenter.");
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), dcMor);
vmMo = dcMo.findVm(vmName);
if (vmMo == null) {
String msg = "VM " + vmName + " does not exist in VMware datacenter";
s_logger.error(msg);
throw new Exception(msg);
}
}
NicTO[] nics = vm.getNics();
for (NicTO nic : nics) {
// prepare network on the host
prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false, cmd.getVirtualMachine().getType());
}
List<Pair<String, Long>> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(_dcId));
for (Pair<String, Long> secStoreUrlAndId : secStoreUrlAndIdList) {
String secStoreUrl = secStoreUrlAndId.first();
Long secStoreId = secStoreUrlAndId.second();
if (secStoreUrl == null) {
String msg = String.format("Secondary storage for dc %s is not ready yet?", _dcId);
throw new Exception(msg);
}
if (vm.getType() != VirtualMachine.Type.User) {
mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId);
}
ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl);
if (morSecDs == null) {
String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl;
throw new Exception(msg);
}
}
return new PrepareForMigrationAnswer(cmd);
} catch (Throwable e) {
return new PrepareForMigrationAnswer(cmd, createLogMessageException(e, cmd));
}
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
protected Answer execute(GetVmIpAddressCommand cmd) {
String details = "Unable to find IP Address of VM. ";
String vmName = cmd.getVmName();
boolean result = false;
String ip = null;
Answer answer = null;
VmwareContext context = getServiceContext();
VmwareHypervisorHost hyperHost = getHyperHost(context);
if (vmName == null || vmName.isEmpty()) {
details += "Name of instance provided is NULL or empty.";
return new Answer(cmd, result, details);
}
try {
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
if (vmMo != null) {
GuestInfo guestInfo = vmMo.getGuestInfo();
VirtualMachineToolsStatus toolsStatus = guestInfo.getToolsStatus();
if (toolsStatus == VirtualMachineToolsStatus.TOOLS_NOT_INSTALLED) {
details += "Vmware tools not installed.";
} else {
ip = guestInfo.getIpAddress();
if (ip != null) {
result = true;
}
details = ip;
}
} else {
details += "VM " + vmName + " no longer exists on vSphere host: " + hyperHost.getHyperHostName();
s_logger.info(details);
}
} catch (Throwable e) {
createLogMessageException(e, cmd);
details = String.format("%s. Encountered exception: [%s].", details, VmwareHelper.getExceptionMessage(e));
s_logger.error(details);
}
answer = new Answer(cmd, result, details);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Returning GetVmIpAddressAnswer: " + _gson.toJson(answer));
}
return answer;
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method fillHostHardwareInfo.
private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFaultFaultMsg, RemoteException, Exception {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
VmwareHypervisorHostResourceSummary summary = hyperHost.getHyperHostResourceSummary();
if (s_logger.isInfoEnabled()) {
s_logger.info("Startup report on host hardware info. " + _gson.toJson(summary));
}
cmd.setCaps("hvm");
cmd.setDom0MinMemory(0);
cmd.setSpeed(summary.getCpuSpeed());
cmd.setCpuSockets(summary.getCpuSockets());
cmd.setCpus((int) summary.getCpuCount());
cmd.setMemory(summary.getMemoryBytes());
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
protected Answer execute(ModifyStoragePoolCommand cmd) {
try {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
StorageFilerTO pool = cmd.getPool();
if (pool.getType() != StoragePoolType.NetworkFilesystem && pool.getType() != StoragePoolType.VMFS && pool.getType() != StoragePoolType.PreSetup && pool.getType() != StoragePoolType.DatastoreCluster) {
throw new Exception("Unsupported storage pool type " + pool.getType());
}
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid());
if (morDatastore == null) {
morDatastore = hyperHost.mountDatastore((pool.getType() == StoragePoolType.VMFS || pool.getType() == StoragePoolType.PreSetup || pool.getType() == StoragePoolType.DatastoreCluster), pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", ""), true);
}
assert (morDatastore != null);
DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore);
HypervisorHostHelper.createBaseFolder(dsMo, hyperHost, pool.getType());
long capacity = 0;
long available = 0;
List<ModifyStoragePoolAnswer> childDatastoresModifyStoragePoolAnswers = new ArrayList<>();
if (pool.getType() == StoragePoolType.DatastoreCluster) {
StoragepodMO datastoreClusterMo = new StoragepodMO(getServiceContext(), morDatastore);
StoragePodSummary dsClusterSummary = datastoreClusterMo.getDatastoreClusterSummary();
capacity = dsClusterSummary.getCapacity();
available = dsClusterSummary.getFreeSpace();
List<ManagedObjectReference> childDatastoreMors = datastoreClusterMo.getDatastoresInDatastoreCluster();
for (ManagedObjectReference childDsMor : childDatastoreMors) {
DatastoreMO childDsMo = new DatastoreMO(getServiceContext(), childDsMor);
Map<String, TemplateProp> tInfo = new HashMap<>();
DatastoreSummary summary = childDsMo.getDatastoreSummary();
;
ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, summary.getCapacity(), summary.getFreeSpace(), tInfo);
StoragePoolInfo poolInfo = answer.getPoolInfo();
poolInfo.setName(summary.getName());
String datastoreClusterPath = pool.getPath();
int pathstartPosition = datastoreClusterPath.lastIndexOf('/');
String datacenterName = datastoreClusterPath.substring(0, pathstartPosition + 1);
String childPath = datacenterName + summary.getName();
poolInfo.setHostPath(childPath);
String uuid = childDsMo.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID);
if (uuid == null) {
uuid = UUID.nameUUIDFromBytes(((pool.getHost() + childPath)).getBytes()).toString();
}
poolInfo.setUuid(uuid);
poolInfo.setLocalPath(cmd.LOCAL_PATH_PREFIX + File.separator + uuid);
answer.setPoolInfo(poolInfo);
answer.setPoolType(summary.getType());
answer.setLocalDatastoreName(morDatastore.getValue());
childDsMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, uuid);
HypervisorHostHelper.createBaseFolderInDatastore(childDsMo, hyperHost.getHyperHostDatacenter());
childDatastoresModifyStoragePoolAnswers.add(answer);
}
} else {
HypervisorHostHelper.createBaseFolderInDatastore(dsMo, hyperHost.getHyperHostDatacenter());
DatastoreSummary summary = dsMo.getDatastoreSummary();
capacity = summary.getCapacity();
available = summary.getFreeSpace();
}
Map<String, TemplateProp> tInfo = new HashMap<>();
ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo);
answer.setDatastoreClusterChildren(childDatastoresModifyStoragePoolAnswers);
if (cmd.getAdd() && (pool.getType() == StoragePoolType.VMFS || pool.getType() == StoragePoolType.PreSetup) && pool.getType() != StoragePoolType.DatastoreCluster) {
answer.setPoolType(dsMo.getDatastoreType());
answer.setLocalDatastoreName(morDatastore.getValue());
}
return answer;
} catch (Throwable e) {
return new Answer(cmd, false, createLogMessageException(e, cmd));
}
}
use of com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost in project cloudstack by apache.
the class VmwareResource method execute.
private Answer execute(ResizeVolumeCommand cmd) {
String path = cmd.getPath();
String vmName = cmd.getInstanceName();
long newSize = cmd.getNewSize() / ResourceType.bytesToKiB;
long oldSize = cmd.getCurrentSize() / ResourceType.bytesToKiB;
boolean managed = cmd.isManaged();
String poolUUID = cmd.getPoolUuid();
String chainInfo = cmd.getChainInfo();
boolean useWorkerVm = false;
VmwareContext context = getServiceContext();
VmwareHypervisorHost hyperHost = getHyperHost(context);
VirtualMachineMO vmMo = null;
String vmdkDataStorePath = null;
try {
if (newSize < oldSize) {
String errorMsg = String.format("VMware doesn't support shrinking volume from larger size [%s] GB to a smaller size [%s] GB. Can't resize volume of VM [name: %s].", oldSize / Float.valueOf(ResourceType.bytesToMiB), newSize / Float.valueOf(ResourceType.bytesToMiB), vmName);
s_logger.error(errorMsg);
throw new Exception(errorMsg);
} else if (newSize == oldSize) {
return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB);
}
if (vmName.equalsIgnoreCase("none")) {
// OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here
// OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway
// we need to spawn a worker VM to attach the volume to and resize the volume.
useWorkerVm = true;
String poolId = cmd.getPoolUuid();
// OfflineVmwareMigration: refactor for re-use
// OfflineVmwareMigration: 1. find data(store)
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
vmName = getWorkerName(getServiceContext(), cmd, 0, dsMo);
s_logger.info("Create worker VM " + vmName);
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName, null);
if (vmMo == null) {
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
throw new Exception("Unable to create a worker VM for volume resize");
}
synchronized (this) {
// OfflineVmwareMigration: 3. attach the disk to the worker
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + VMDK_EXTENSION);
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
}
}
// OfflineVmwareMigration: 4. find the (worker-) VM
// find VM through datacenter (VM is not at the target host yet)
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
if (vmMo == null) {
String errorMsg = String.format("VM [name: %s] does not exist in VMware datacenter.", vmName);
s_logger.error(errorMsg);
throw new Exception(errorMsg);
}
if (managed) {
ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
ClusterMO clusterMO = new ClusterMO(context, morCluster);
List<Pair<ManagedObjectReference, String>> lstHosts = clusterMO.getClusterHosts();
Collections.shuffle(lstHosts, RANDOM);
Pair<ManagedObjectReference, String> host = lstHosts.get(0);
HostMO hostMO = new HostMO(context, host.first());
HostDatastoreSystemMO hostDatastoreSystem = hostMO.getHostDatastoreSystemMO();
String iScsiName = cmd.get_iScsiName();
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, VmwareResource.getDatastoreName(iScsiName));
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
_storageProcessor.expandDatastore(hostDatastoreSystem, dsMo);
}
boolean volumePathChangeObserved = false;
boolean datastoreChangeObserved = false;
Pair<String, String> pathAndChainInfo = getNewPathAndChainInfoInDatastoreCluster(vmMo, path, chainInfo, managed, cmd.get_iScsiName(), poolUUID, cmd.getContextParam(DiskTO.PROTOCOL_TYPE));
Pair<String, String> poolUUIDandChainInfo = getNewPoolUUIDAndChainInfoInDatastoreCluster(vmMo, path, chainInfo, managed, cmd.get_iScsiName(), poolUUID, cmd.getContextParam(DiskTO.PROTOCOL_TYPE));
if (pathAndChainInfo != null) {
volumePathChangeObserved = true;
path = pathAndChainInfo.first();
chainInfo = pathAndChainInfo.second();
}
if (poolUUIDandChainInfo != null) {
datastoreChangeObserved = true;
poolUUID = poolUUIDandChainInfo.first();
chainInfo = poolUUIDandChainInfo.second();
}
// OfflineVmwareMigration: 5. ignore/replace the rest of the try-block; It is the functional bit
VirtualDisk disk = getDiskAfterResizeDiskValidations(vmMo, path);
String vmdkAbsFile = getAbsoluteVmdkFile(disk);
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
}
disk.setCapacityInKB(newSize);
VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec();
deviceConfigSpec.setDevice(disk);
deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT);
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
vmConfigSpec.getDeviceChange().add(deviceConfigSpec);
if (!vmMo.configureVm(vmConfigSpec)) {
throw new Exception(String.format("Failed to configure VM [name: %s] to resize disk.", vmName));
}
ResizeVolumeAnswer answer = new ResizeVolumeAnswer(cmd, true, "success", newSize * 1024);
if (datastoreChangeObserved) {
answer.setContextParam("datastoreUUID", poolUUID);
answer.setContextParam("chainInfo", chainInfo);
}
if (volumePathChangeObserved) {
answer.setContextParam("volumePath", path);
answer.setContextParam("chainInfo", chainInfo);
}
return answer;
} catch (Exception e) {
String errorMsg = String.format("Failed to resize volume of VM [name: %s] due to: [%s].", vmName, e.getMessage());
s_logger.error(errorMsg, e);
return new ResizeVolumeAnswer(cmd, false, errorMsg);
} finally {
// OfflineVmwareMigration: 6. check if a worker was used and destroy it if needed
try {
if (useWorkerVm) {
s_logger.info("Destroy worker VM after volume resize");
vmMo.detachDisk(vmdkDataStorePath, false);
vmMo.destroy();
}
} catch (Throwable e) {
s_logger.error(String.format("Failed to destroy worker VM [name: %s] due to: [%s].", vmName, e.getMessage()), e);
}
}
}
Aggregations