use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class UserVmManagerImpl method handleManagedStorage.
private void handleManagedStorage(final UserVmVO vm, final VolumeVO root) {
if (Volume.State.Allocated.equals(root.getState())) {
return;
}
final StoragePoolVO storagePool = _storagePoolDao.findById(root.getPoolId());
if (storagePool != null && storagePool.isManaged()) {
final Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
if (hostId != null) {
final VolumeInfo volumeInfo = volFactory.getVolume(root.getId());
final Host host = _hostDao.findById(hostId);
final Command cmd;
if (host.getHypervisorType() == HypervisorType.XenServer) {
final DiskTO disk = new DiskTO(volumeInfo.getTO(), root.getDeviceId(), root.getPath(), root.getVolumeType());
// it's OK in this case to send a detach command to the host for a root volume as this
// will simply lead to the SR that supports the root volume being removed
cmd = new DettachCommand(disk, vm.getInstanceName());
final DettachCommand detachCommand = (DettachCommand) cmd;
detachCommand.setManaged(true);
detachCommand.setStorageHost(storagePool.getHostAddress());
detachCommand.setStoragePort(storagePool.getPort());
detachCommand.set_iScsiName(root.get_iScsiName());
} else {
throw new CloudRuntimeException("This hypervisor type is not supported on managed storage for this command.");
}
final Commands cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(cmd);
try {
_agentMgr.send(hostId, cmds);
} catch (final Exception ex) {
throw new CloudRuntimeException(ex.getMessage());
}
if (!cmds.isSuccessful()) {
for (final Answer answer : cmds.getAnswers()) {
if (!answer.getResult()) {
s_logger.warn("Failed to reset vm due to: " + answer.getDetails());
throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails());
}
}
}
// root.getPoolId() should be null if the VM we are detaching the disk from has never been started before
final DataStore dataStore = root.getPoolId() != null ? _dataStoreMgr.getDataStore(root.getPoolId(), DataStoreRole.Primary) : null;
volumeMgr.revokeAccess(volFactory.getVolume(root.getId()), host, dataStore);
}
}
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class UserVmManagerImpl method migrateVirtualMachineWithVolume.
@Override
@ActionEvent(eventType = EventTypes.EVENT_VM_MIGRATE, eventDescription = "migrating VM", async = true)
public VirtualMachine migrateVirtualMachineWithVolume(final Long vmId, final Host destinationHost, final Map<String, String> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException {
// Access check - only root administrator can migrate VM.
final Account caller = CallContext.current().getCallingAccount();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the VM");
}
throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!");
}
final VMInstanceVO vm = _vmInstanceDao.findById(vmId);
if (vm == null) {
throw new InvalidParameterValueException("Unable to find the vm by id " + vmId);
}
if (vm.getState() != State.Running) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is not Running, unable to migrate the vm " + vm);
}
final CloudRuntimeException ex = new CloudRuntimeException("VM is not Running, unable to migrate the vm with" + " specified id");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
if (serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
}
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.KVM)) {
throw new InvalidParameterValueException("Unsupported hypervisor type for vm migration, we support" + " XenServer/KVM only");
}
final long srcHostId = vm.getHostId();
final Host srcHost = _resourceMgr.getHost(srcHostId);
if (srcHost == null) {
throw new InvalidParameterValueException("Cannot migrate VM, there is not Host with id: " + srcHostId);
}
// Check if src and destination hosts are valid and migrating to same host
if (destinationHost.getId() == srcHostId) {
throw new InvalidParameterValueException("Cannot migrate VM, VM is already present on this host, please" + " specify valid destination host to migrate the VM");
}
// Check if the source and destination hosts are of the same type and support storage motion.
if (!srcHost.getHypervisorType().equals(destinationHost.getHypervisorType())) {
throw new CloudRuntimeException("The source and destination hosts are not of the same type " + "Source hypervisor type and version: " + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion());
}
if (srcHost.getHypervisorVersion() != null && !srcHost.getHypervisorVersion().equals(destinationHost.getHypervisorVersion())) {
throw new CloudRuntimeException("The source and destination hosts are not of the same version. " + "Source hypervisor type and version: " + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion());
}
final HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
if (!capabilities.isStorageMotionSupported()) {
throw new CloudRuntimeException("Migration with storage isn't supported on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion());
}
// Check if destination host is up.
if (destinationHost.getState() != com.cloud.host.Status.Up || destinationHost.getResourceState() != ResourceState.Enabled) {
throw new CloudRuntimeException("Cannot migrate VM, destination host is not in correct state, has " + "status: " + destinationHost.getState() + ", state: " + destinationHost.getResourceState());
}
// Check that Vm does not have VM Snapshots
if (_vmSnapshotDao.findByVm(vmId).size() > 0) {
throw new InvalidParameterValueException("VM with VM Snapshots cannot be migrated with storage, please remove all VM snapshots");
}
final List<VolumeVO> vmVolumes = _volsDao.findUsableVolumesForInstance(vm.getId());
final Map<Long, Long> volToPoolObjectMap = new HashMap<>();
if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId().equals(srcHost.getClusterId())) {
if (volumeToPool.isEmpty()) {
// then fail the call. migrateVirtualMachine api should have been used.
throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destinationHost + " doesn't involve migrating the volumes.");
}
}
if (!volumeToPool.isEmpty()) {
// Check if all the volumes and pools passed as parameters are valid.
for (final Map.Entry<String, String> entry : volumeToPool.entrySet()) {
final VolumeVO volume = _volsDao.findByUuid(entry.getKey());
final StoragePoolVO pool = _storagePoolDao.findByUuid(entry.getValue());
if (volume == null) {
throw new InvalidParameterValueException("There is no volume present with the given id " + entry.getKey());
} else if (pool == null) {
throw new InvalidParameterValueException("There is no storage pool present with the given id " + entry.getValue());
} else if (pool.isInMaintenance()) {
throw new InvalidParameterValueException("Cannot migrate volume " + volume + "to the destination storage pool " + pool.getName() + " as the storage pool is in maintenance mode.");
} else {
// Verify the volume given belongs to the vm.
if (!vmVolumes.contains(volume)) {
throw new InvalidParameterValueException("There volume " + volume + " doesn't belong to " + "the virtual machine " + vm + " that has to be migrated");
}
volToPoolObjectMap.put(volume.getId(), pool.getId());
}
}
}
// Check if all the volumes are in the correct state.
for (final VolumeVO volume : vmVolumes) {
if (volume.getState() != Volume.State.Ready) {
throw new CloudRuntimeException("Volume " + volume + " of the VM is not in Ready state. Cannot " + "migrate the vm with its volumes.");
}
}
// Check max guest vm limit for the destinationHost.
final HostVO destinationHostVO = _hostDao.findById(destinationHost.getId());
if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) {
throw new VirtualMachineMigrationException("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() + " already has max running vms (count includes system VMs). Cannot" + " migrate to this host");
}
checkHostsDedication(vm, srcHostId, destinationHost.getId());
_itMgr.migrateWithStorage(vm.getUuid(), srcHostId, destinationHost.getId(), volToPoolObjectMap);
return _vmDao.findById(vm.getId());
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class StoragePoolAutomationImpl method cancelMaintain.
@Override
public boolean cancelMaintain(final DataStore store) {
// Change the storage state back to up
final Long userId = CallContext.current().getCallingUserId();
final User user = _userDao.findById(userId);
final Account account = CallContext.current().getCallingAccount();
final StoragePoolVO poolVO = primaryDataStoreDao.findById(store.getId());
final StoragePool pool = (StoragePool) store;
// Handeling the Zone wide and cluster wide primay storage
List<HostVO> hosts = new ArrayList<>();
// if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand
if (poolVO.getScope().equals(ScopeType.ZONE)) {
if (HypervisorType.Any.equals(pool.getHypervisor())) {
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId());
} else {
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(poolVO.getHypervisor(), pool.getDataCenterId());
}
} else {
hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up);
}
if (hosts == null || hosts.size() == 0) {
return true;
}
// add heartbeat
for (final HostVO host : hosts) {
final ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool);
final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
if (answer == null || !answer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails()));
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add secceeded");
}
}
}
// 2. Get a list of pending work for this queue
final List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
// 3. work through the queue
for (final StoragePoolWorkVO work : pendingWork) {
try {
final VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
if (vmInstance == null) {
continue;
}
// proxy
if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) {
final ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId());
vmMgr.advanceStart(consoleProxy.getUuid(), null, null);
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
// if the instance is of type ssvm, call the ssvm manager
if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) {
final SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance.getId());
vmMgr.advanceStart(ssVm.getUuid(), null, null);
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
// manager
if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) {
final DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
vmMgr.advanceStart(domR.getUuid(), null, null);
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
// if the instance is of type user vm, call the user vm manager
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
// don't allow to start vm that doesn't have a root volume
if (volumeDao.findByInstanceAndType(vmInstance.getId(), Volume.Type.ROOT).isEmpty()) {
_storagePoolWorkDao.remove(work.getId());
} else {
final UserVmVO userVm = userVmDao.findById(vmInstance.getId());
vmMgr.advanceStart(userVm.getUuid(), null, null);
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
} catch (final Exception e) {
s_logger.debug("Failed start vm", e);
throw new CloudRuntimeException(e.toString());
}
}
return false;
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method orchestrateAttachVolumeToVM.
private Volume orchestrateAttachVolumeToVM(final Long vmId, final Long volumeId, final Long deviceId) {
final VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
if (volumeToAttach.isAttachedVM()) {
throw new CloudRuntimeException("This volume is already attached to a VM.");
}
UserVmVO vm = _userVmDao.findById(vmId);
VolumeVO exstingVolumeOfVm = null;
final List<VolumeVO> rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.ROOT);
if (rootVolumesOfVm.size() > 1) {
throw new CloudRuntimeException("The VM " + vm.getHostName() + " has more than one ROOT volume and is in an invalid state.");
} else {
if (!rootVolumesOfVm.isEmpty()) {
exstingVolumeOfVm = rootVolumesOfVm.get(0);
} else {
// locate data volume of the vm
final List<VolumeVO> diskVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
for (final VolumeVO diskVolume : diskVolumesOfVm) {
if (diskVolume.getState() != Volume.State.Allocated) {
exstingVolumeOfVm = diskVolume;
break;
}
}
}
}
final HypervisorType rootDiskHyperType = vm.getHypervisorType();
final HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
VolumeInfo newVolumeOnPrimaryStorage = volumeToAttach;
// don't create volume on primary storage if its being attached to the vm which Root's volume hasn't been created yet
StoragePoolVO destPrimaryStorage = null;
if (exstingVolumeOfVm != null && !exstingVolumeOfVm.getState().equals(Volume.State.Allocated)) {
destPrimaryStorage = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId());
}
final boolean volumeOnSecondary = volumeToAttach.getState() == Volume.State.Uploaded;
if (destPrimaryStorage != null && (volumeToAttach.getState() == Volume.State.Allocated || volumeOnSecondary)) {
try {
newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, rootDiskHyperType, destPrimaryStorage);
} catch (final NoTransitionException e) {
s_logger.debug("Failed to create volume on primary storage", e);
throw new CloudRuntimeException("Failed to create volume on primary storage", e);
}
}
// reload the volume from db
newVolumeOnPrimaryStorage = volFactory.getVolume(newVolumeOnPrimaryStorage.getId());
final boolean moveVolumeNeeded = needMoveVolume(exstingVolumeOfVm, newVolumeOnPrimaryStorage);
if (moveVolumeNeeded) {
final PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo) newVolumeOnPrimaryStorage.getDataStore();
if (primaryStore.isLocal()) {
throw new CloudRuntimeException("Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() + " as migration of local data volume is not allowed");
}
final StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId());
try {
newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(), volumeToAttachHyperType);
} catch (final ConcurrentOperationException e) {
s_logger.debug("move volume failed", e);
throw new CloudRuntimeException("move volume failed", e);
} catch (final StorageUnavailableException e) {
s_logger.debug("move volume failed", e);
throw new CloudRuntimeException("move volume failed", e);
}
}
VolumeVO newVol = _volsDao.findById(newVolumeOnPrimaryStorage.getId());
// Getting the fresh vm object in case of volume migration to check the current state of VM
if (moveVolumeNeeded || volumeOnSecondary) {
vm = _userVmDao.findById(vmId);
if (vm == null) {
throw new InvalidParameterValueException("VM not found.");
}
}
newVol = sendAttachVolumeCommand(vm, newVol, deviceId);
return newVol;
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method sendAttachVolumeCommand.
private VolumeVO sendAttachVolumeCommand(final UserVmVO vm, VolumeVO volumeToAttach, Long deviceId) {
String errorMsg = "Failed to attach volume " + volumeToAttach.getName() + " to VM " + vm.getHostName();
boolean sendCommand = vm.getState() == State.Running;
AttachAnswer answer = null;
final Long hostId = vm.getHostId();
HostVO host = null;
final StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
if (hostId != null) {
host = _hostDao.findById(hostId);
if (host != null && host.getHypervisorType() == HypervisorType.XenServer && volumeToAttachStoragePool != null && volumeToAttachStoragePool.isManaged()) {
sendCommand = true;
}
}
// volumeToAttachStoragePool should be null if the VM we are attaching the disk to has never been started before
final DataStore dataStore = volumeToAttachStoragePool != null ? dataStoreMgr.getDataStore(volumeToAttachStoragePool.getId(), DataStoreRole.Primary) : null;
// if we don't have a host, the VM we are attaching the disk to has never been started before
if (host != null) {
try {
volService.grantAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
} catch (final Exception e) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
throw new CloudRuntimeException(e.getMessage());
}
}
if (sendCommand) {
if (host != null && host.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) {
volumeToAttach.setPath(volumeToAttach.get_iScsiName());
_volsDao.update(volumeToAttach.getId(), volumeToAttach);
}
final DataTO volTO = volFactory.getVolume(volumeToAttach.getId()).getTO();
deviceId = getDeviceId(vm, deviceId);
final DiskTO disk = new DiskTO(volTO, deviceId, volumeToAttach.getPath(), volumeToAttach.getVolumeType());
final AttachCommand cmd = new AttachCommand(disk, vm.getInstanceName());
final ChapInfo chapInfo = volService.getChapInfo(volFactory.getVolume(volumeToAttach.getId()), dataStore);
final Map<String, String> details = new HashMap<>();
disk.setDetails(details);
details.put(DiskTO.MANAGED, String.valueOf(volumeToAttachStoragePool.isManaged()));
details.put(DiskTO.STORAGE_HOST, volumeToAttachStoragePool.getHostAddress());
details.put(DiskTO.STORAGE_PORT, String.valueOf(volumeToAttachStoragePool.getPort()));
details.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeToAttach.getSize()));
details.put(DiskTO.IQN, volumeToAttach.get_iScsiName());
details.put(DiskTO.MOUNT_POINT, volumeToAttach.get_iScsiName());
details.put(DiskTO.PROTOCOL_TYPE, volumeToAttach.getPoolType() != null ? volumeToAttach.getPoolType().toString() : null);
if (chapInfo != null) {
details.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
details.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
details.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
details.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
_userVmDao.loadDetails(vm);
final Map<String, String> controllerInfo = new HashMap<>();
controllerInfo.put(VmDetailConstants.ROOT_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER));
controllerInfo.put(VmDetailConstants.DATA_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER));
cmd.setControllerInfo(controllerInfo);
try {
answer = (AttachAnswer) _agentMgr.send(hostId, cmd);
} catch (final Exception e) {
if (host != null) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
}
throw new CloudRuntimeException(errorMsg + " due to: " + e.getMessage());
}
}
if (!sendCommand || answer != null && answer.getResult()) {
// Mark the volume as attached
if (sendCommand) {
final DiskTO disk = answer.getDisk();
_volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), disk.getDiskSeq());
volumeToAttach = _volsDao.findById(volumeToAttach.getId());
if (volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) {
volumeToAttach.setPath(answer.getDisk().getPath());
_volsDao.update(volumeToAttach.getId(), volumeToAttach);
}
} else {
deviceId = getDeviceId(vm, deviceId);
_volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), deviceId);
}
// insert record for disk I/O statistics
VmDiskStatisticsVO diskstats = _vmDiskStatsDao.findBy(vm.getAccountId(), vm.getDataCenterId(), vm.getId(), volumeToAttach.getId());
if (diskstats == null) {
diskstats = new VmDiskStatisticsVO(vm.getAccountId(), vm.getDataCenterId(), vm.getId(), volumeToAttach.getId());
_vmDiskStatsDao.persist(diskstats);
}
return _volsDao.findById(volumeToAttach.getId());
} else {
if (answer != null) {
final String details = answer.getDetails();
if (details != null && !details.isEmpty()) {
errorMsg += "; " + details;
}
}
if (host != null) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
}
throw new CloudRuntimeException(errorMsg);
}
}
Aggregations