use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class VolumeOrchestrator method getTasks.
private List<VolumeTask> getTasks(final List<VolumeVO> vols, final Map<Volume, StoragePool> destVols, final VirtualMachineProfile vm) throws StorageUnavailableException {
final boolean recreate = RecreatableSystemVmEnabled.value();
final List<VolumeTask> tasks = new ArrayList<>();
for (final VolumeVO vol : vols) {
StoragePoolVO assignedPool = null;
if (destVols != null) {
final StoragePool pool = destVols.get(vol);
if (pool != null) {
assignedPool = _storagePoolDao.findById(pool.getId());
}
}
if (assignedPool == null && recreate) {
assignedPool = _storagePoolDao.findById(vol.getPoolId());
}
if (assignedPool != null) {
final Volume.State state = vol.getState();
if (state == Volume.State.Allocated || state == Volume.State.Creating) {
final VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
tasks.add(task);
} else {
if (vol.isRecreatable()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner");
}
final VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
tasks.add(task);
} else {
if (assignedPool.getId() != vol.getPoolId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol);
}
final DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
if (diskOffering.getUseLocalStorage()) {
// Currently migration of local volume is not supported so bail out
if (s_logger.isDebugEnabled()) {
s_logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
}
throw new CloudRuntimeException("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
} else {
// Check if storage migration is enabled in config
final Boolean isHAOperation = (Boolean) vm.getParameter(VirtualMachineProfile.Param.HaOperation);
Boolean storageMigrationEnabled = true;
if (isHAOperation != null && isHAOperation) {
storageMigrationEnabled = StorageHAMigrationEnabled.value();
} else {
storageMigrationEnabled = StorageMigrationEnabled.value();
}
// Always allow ISOs volumes to be "migrated"
if (storageMigrationEnabled || vol.getIsoId() != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
}
final VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool);
tasks.add(task);
} else {
throw new CloudRuntimeException("Cannot start VM on the hypervisor it was last running on, due to not enough capacity. Please try to start on" + " " + "another hypervisor in the same cluster, or migrate the volumes to another storage pool. Automatic Volume Migration is disabled, " + "so this is not handled automatically.");
}
}
} else {
final StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
final VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool);
tasks.add(task);
}
}
}
} else {
if (vol.getPoolId() == null) {
throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, Volume.class, vol.getId());
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
}
final StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
final VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool);
tasks.add(task);
}
}
return tasks;
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class VirtualMachineManagerImpl method getPoolListForVolumesForMigration.
private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
for (final VolumeVO volume : allVolumes) {
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
if (pool != null) {
// created is compliant with the pool type.
if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null || pool.isLocal() != diskOffering.getUseLocalStorage()) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool + " while migrating vm to host " + host + ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + "the given pool.");
} else if (pool.getId() == currentPool.getId()) {
// If the pool to migrate too is the same as current pool, the volume doesn't need to be migrated.
} else {
volumeToPoolObjectMap.put(volume, pool);
}
} else {
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
final ExcludeList avoid = new ExcludeList();
boolean currentPoolAvailable = false;
final List<StoragePool> poolList = new ArrayList<>();
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
poolList.addAll(poolListFromAllocator);
}
}
if (poolList != null && !poolList.isEmpty()) {
// Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
// volume to a pool only if it is required; that is the current pool on which the volume resides
// is not available on the destination host.
final Iterator<StoragePool> iter = poolList.iterator();
while (iter.hasNext()) {
if (currentPool.getId() == iter.next().getId()) {
currentPoolAvailable = true;
break;
}
}
if (!currentPoolAvailable) {
volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
}
}
if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + profile.getVirtualMachine() + " to host " + host);
}
}
}
return volumeToPoolObjectMap;
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class StorageSystemDataMotionStrategy method getVolumeDetails.
private Map<String, String> getVolumeDetails(final VolumeInfo volumeInfo) {
final Map<String, String> sourceDetails = new HashMap<>();
final VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
final long storagePoolId = volumeVO.getPoolId();
final StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName());
final ChapInfo chapInfo = _volumeService.getChapInfo(volumeInfo, volumeInfo.getDataStore());
if (chapInfo != null) {
sourceDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
sourceDetails.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
sourceDetails.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
sourceDetails.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
return sourceDetails;
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class StorageSystemDataMotionStrategy method getHost.
public HostVO getHost(final long dataStoreId) {
final StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStoreId);
final List<? extends Cluster> clusters = _mgr.searchForClusters(storagePoolVO.getDataCenterId(), new Long(0), Long.MAX_VALUE, HypervisorType.XenServer.toString());
if (clusters == null) {
throw new CloudRuntimeException("Unable to locate an applicable cluster");
}
for (final Cluster cluster : clusters) {
if (cluster.getAllocationState() == AllocationState.Enabled) {
final List<HostVO> hosts = _hostDao.findByClusterId(cluster.getId());
if (hosts != null) {
for (final HostVO host : hosts) {
if (host.getResourceState() == ResourceState.Enabled) {
return host;
}
}
}
}
}
throw new CloudRuntimeException("Unable to locate an applicable cluster");
}
use of com.cloud.storage.datastore.db.StoragePoolVO in project cosmic by MissionCriticalCloud.
the class ZoneWideStoragePoolAllocator method select.
@Override
protected List<StoragePool> select(final DiskProfile dskCh, final VirtualMachineProfile vmProfile, final DeploymentPlan plan, final ExcludeList avoid, final int returnUpTo) {
s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
if (dskCh.useLocalStorage()) {
return null;
}
if (s_logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
final List<StoragePoolVO> disabledPools = _storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (final StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
}
final List<StoragePool> suitablePools = new ArrayList<>();
List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
if (storagePools == null) {
storagePools = new ArrayList<>();
}
final List<StoragePoolVO> anyHypervisorStoragePools = new ArrayList<>();
for (final StoragePoolVO storagePool : storagePools) {
if (HypervisorType.Any.equals(storagePool.getHypervisor())) {
anyHypervisorStoragePools.add(storagePool);
}
}
final List<StoragePoolVO> storagePoolsByHypervisor = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), dskCh.getHypervisorType());
storagePools.retainAll(storagePoolsByHypervisor);
storagePools.addAll(anyHypervisorStoragePools);
// add remaining pools in zone, that did not match tags, to avoid set
final List<StoragePoolVO> allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
allPools.removeAll(storagePools);
for (final StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
}
detectSuitableOrToAvoidPools(dskCh, plan, avoid, returnUpTo, suitablePools, storagePools);
return suitablePools;
}
Aggregations