use of com.cloud.storage.VolumeVO in project cloudstack by apache.
the class VirtualMachineManagerImpl method getPoolListForVolumesForMigration.
private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
for (final VolumeVO volume : allVolumes) {
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
final StoragePoolVO destPool = _storagePoolDao.findById(poolId);
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
if (destPool != null) {
// created is compliant with the pool type.
if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host + ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + "the given pool.");
} else if (destPool.getId() == currentPool.getId()) {
// If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated.
} else {
volumeToPoolObjectMap.put(volume, destPool);
}
} else {
if (currentPool.isManaged()) {
volumeToPoolObjectMap.put(volume, currentPool);
} else {
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
final List<StoragePool> poolList = new ArrayList<>();
final ExcludeList avoid = new ExcludeList();
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
poolList.addAll(poolListFromAllocator);
}
}
boolean currentPoolAvailable = false;
if (poolList != null && !poolList.isEmpty()) {
// Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
// volume to a pool only if it is required; that is the current pool on which the volume resides
// is not available on the destination host.
final Iterator<StoragePool> iter = poolList.iterator();
while (iter.hasNext()) {
if (currentPool.getId() == iter.next().getId()) {
currentPoolAvailable = true;
break;
}
}
if (!currentPoolAvailable) {
volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
}
}
if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + profile.getVirtualMachine() + " to host " + host);
}
}
}
}
return volumeToPoolObjectMap;
}
use of com.cloud.storage.VolumeVO in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateMigrateAway.
private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, final DeploymentPlanner planner) throws InsufficientServerCapacityException {
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
if (vm == null) {
s_logger.debug("Unable to find a VM for " + vmUuid);
throw new CloudRuntimeException("Unable to find " + vmUuid);
}
ServiceOfferingVO offeringVO = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, offeringVO, null, null);
final Long hostId = vm.getHostId();
if (hostId == null) {
s_logger.debug("Unable to migrate because the VM doesn't have a host id: " + vm);
throw new CloudRuntimeException("Unable to migrate " + vmUuid);
}
final Host host = _hostDao.findById(hostId);
Long poolId = null;
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO rootVolumeOfVm : vols) {
final StoragePoolVO rootDiskPool = _storagePoolDao.findById(rootVolumeOfVm.getPoolId());
if (rootDiskPool != null) {
poolId = rootDiskPool.getId();
}
}
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, poolId, null);
final ExcludeList excludes = new ExcludeList();
excludes.addHost(hostId);
DeployDestination dest = null;
while (true) {
try {
dest = _dpMgr.planDeployment(profile, plan, excludes, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found destination " + dest + " for migrating to.");
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find destination for migrating the vm " + profile);
}
throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId());
}
excludes.addHost(dest.getHost().getId());
try {
migrate(vm, srcHostId, dest);
return;
} catch (final ResourceUnavailableException e) {
s_logger.debug("Unable to migrate to unavailable " + dest);
} catch (final ConcurrentOperationException e) {
s_logger.debug("Unable to migrate VM due to: " + e.getMessage());
}
try {
advanceStop(vmUuid, true);
throw new CloudRuntimeException("Unable to migrate " + vm);
} catch (final ResourceUnavailableException e) {
s_logger.debug("Unable to stop VM due to " + e.getMessage());
throw new CloudRuntimeException("Unable to migrate " + vm);
} catch (final ConcurrentOperationException e) {
s_logger.debug("Unable to stop VM due to " + e.getMessage());
throw new CloudRuntimeException("Unable to migrate " + vm);
} catch (final OperationTimedoutException e) {
s_logger.debug("Unable to stop VM due to " + e.getMessage());
throw new CloudRuntimeException("Unable to migrate " + vm);
}
}
}
use of com.cloud.storage.VolumeVO in project cloudstack by apache.
the class VirtualMachineManagerImpl method syncDiskChainChange.
private void syncDiskChainChange(final StartAnswer answer) {
final VirtualMachineTO vmSpec = answer.getVirtualMachine();
for (final DiskTO disk : vmSpec.getDisks()) {
if (disk.getType() != Volume.Type.ISO) {
final VolumeObjectTO vol = (VolumeObjectTO) disk.getData();
final VolumeVO volume = _volsDao.findById(vol.getId());
// returned null instead of an actual path (because it was out of date with the DB).
if (vol.getPath() != null) {
volumeMgr.updateVolumeDiskChain(vol.getId(), vol.getPath(), vol.getChainInfo());
} else {
volumeMgr.updateVolumeDiskChain(vol.getId(), volume.getPath(), vol.getChainInfo());
}
}
}
}
use of com.cloud.storage.VolumeVO in project cloudstack by apache.
the class VirtualMachineManagerImpl method handlePath.
// for managed storage on XenServer and VMware, need to update the DB with a path if the VDI/VMDK file was newly created
private void handlePath(final DiskTO[] disks, final Map<String, String> iqnToPath) {
if (disks != null && iqnToPath != null) {
for (final DiskTO disk : disks) {
final Map<String, String> details = disk.getDetails();
final boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
if (isManaged) {
final Long volumeId = disk.getData().getId();
final VolumeVO volume = _volsDao.findById(volumeId);
final String iScsiName = volume.get_iScsiName();
final String path = iqnToPath.get(iScsiName);
if (path != null) {
volume.setPath(path);
_volsDao.update(volumeId, volume);
}
}
}
}
}
use of com.cloud.storage.VolumeVO in project cloudstack by apache.
the class VolumeDataFactoryImpl method getVolume.
@Override
public VolumeInfo getVolume(long volumeId, DataStore store) {
VolumeVO volumeVO = volumeDao.findById(volumeId);
VolumeObject vol = VolumeObject.getVolumeObject(store, volumeVO);
return vol;
}
Aggregations