use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class StorageManagerImpl method storagePoolHasEnoughSpace.
@Override
public boolean storagePoolHasEnoughSpace(List<Volume> volumes, StoragePool pool, Long clusterId) {
if (volumes == null || volumes.isEmpty()) {
return false;
}
if (!checkUsagedSpace(pool)) {
return false;
}
// allocated space includes templates
if (s_logger.isDebugEnabled()) {
s_logger.debug("Destination pool id: " + pool.getId());
}
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
long allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null);
long totalAskingSize = 0;
for (Volume volume : volumes) {
// refreshing the volume from the DB to get latest hv_ss_reserve (hypervisor snapshot reserve) field
// I could have just assigned this to "volume", but decided to make a new variable for it so that it
// might be clearer that this "volume" in "volumes" still might have an old value for hv_ss_reverse.
VolumeVO volumeVO = _volumeDao.findById(volume.getId());
if (volumeVO.getHypervisorSnapshotReserve() == null) {
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volService.updateHypervisorSnapshotReserveForVolume(getDiskOfferingVO(volumeVO), volumeVO.getId(), getHypervisorType(volumeVO));
// hv_ss_reserve field might have been updated; refresh from DB to make use of it in getDataObjectSizeIncludingHypervisorSnapshotReserve
volumeVO = _volumeDao.findById(volume.getId());
}
// to leverage a template)
if (volume.getTemplateId() != null) {
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volume.getTemplateId());
if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) {
allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
}
}
// In case the volume is moving across pools or is not ready yet, the asking size has to be computed
if (s_logger.isDebugEnabled()) {
s_logger.debug("pool id for the volume with id: " + volumeVO.getId() + " is " + volumeVO.getPoolId());
}
if ((volumeVO.getState() != Volume.State.Ready) || (volumeVO.getPoolId() != pool.getId())) {
if (ScopeType.ZONE.equals(poolVO.getScope()) && volumeVO.getTemplateId() != null) {
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId());
if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) {
if (clusterId != null && _clusterDao.getSupportsResigning(clusterId)) {
totalAskingSize += getBytesRequiredForTemplate(tmpl, pool);
}
}
}
}
}
long totalOverProvCapacity;
if (pool.getPoolType().supportsOverProvisioning()) {
BigDecimal overProvFactor = getStorageOverProvisioningFactor(pool.getId());
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString());
s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes());
} else {
totalOverProvCapacity = pool.getCapacityBytes();
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString());
}
s_logger.debug("Total capacity of the pool " + poolVO.getName() + " id: " + pool.getId() + " is " + totalOverProvCapacity);
double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + storageAllocatedThreshold);
}
double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double) (totalOverProvCapacity);
if (usedPercentage > storageAllocatedThreshold) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold + ", skipping this pool");
}
return false;
}
if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize);
}
return false;
}
return true;
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class VirtualMachineManagerImpl method getPoolListForVolumesForMigration.
private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
for (final VolumeVO volume : allVolumes) {
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
final StoragePoolVO destPool = _storagePoolDao.findById(poolId);
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
if (destPool != null) {
// created is compliant with the pool type.
if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host + ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + "the given pool.");
} else if (destPool.getId() == currentPool.getId()) {
// If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated.
} else {
volumeToPoolObjectMap.put(volume, destPool);
}
} else {
if (currentPool.isManaged()) {
volumeToPoolObjectMap.put(volume, currentPool);
} else {
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
final List<StoragePool> poolList = new ArrayList<>();
final ExcludeList avoid = new ExcludeList();
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
poolList.addAll(poolListFromAllocator);
}
}
boolean currentPoolAvailable = false;
if (poolList != null && !poolList.isEmpty()) {
// Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
// volume to a pool only if it is required; that is the current pool on which the volume resides
// is not available on the destination host.
final Iterator<StoragePool> iter = poolList.iterator();
while (iter.hasNext()) {
if (currentPool.getId() == iter.next().getId()) {
currentPoolAvailable = true;
break;
}
}
if (!currentPoolAvailable) {
volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
}
}
if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + profile.getVirtualMachine() + " to host " + host);
}
}
}
}
return volumeToPoolObjectMap;
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class SolidFireSharedHostListener method handleVMware.
private void handleVMware(long hostId, boolean add) {
HostVO host = hostDao.findById(hostId);
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
List<StoragePoolVO> storagePools = storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME);
if (storagePools != null && storagePools.size() > 0) {
List<Map<String, String>> targets = new ArrayList<>();
for (StoragePoolVO storagePool : storagePools) {
if (storagePool.getClusterId().equals(host.getClusterId())) {
long storagePoolId = storagePool.getId();
StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN);
String iqn = storagePoolDetail.getValue();
storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP);
String sVip = storagePoolDetail.getValue();
storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT);
String sPort = storagePoolDetail.getValue();
Map<String, String> details = new HashMap<>();
details.put(ModifyTargetsCommand.IQN, iqn);
details.put(ModifyTargetsCommand.STORAGE_HOST, sVip);
details.put(ModifyTargetsCommand.STORAGE_PORT, sPort);
targets.add(details);
}
}
if (targets.size() > 0) {
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
cmd.setAdd(add);
cmd.setTargets(targets);
sendModifyTargetsCommand(cmd, hostId);
}
}
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class UserVmManagerTest method testRestoreVMF2.
// Test restoreVm when VM is in stopped state
@Test
public void testRestoreVMF2() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException {
doReturn(VirtualMachine.State.Stopped).when(_vmMock).getState();
when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
when(_volsDao.findByInstanceAndType(314L, Volume.Type.ROOT)).thenReturn(_rootVols);
doReturn(false).when(_rootVols).isEmpty();
when(_rootVols.get(eq(0))).thenReturn(_volumeMock);
doReturn(3L).when(_volumeMock).getTemplateId();
when(_templateDao.findById(anyLong())).thenReturn(_templateMock);
when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock);
doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong());
when(_volumeMock.getId()).thenReturn(3L);
doNothing().when(_volsDao).detachVolume(anyLong());
when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735");
Account account = new AccountVO("testaccount", 1L, "networkdomain", (short) 0, "uuid");
UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
StoragePoolVO storagePool = new StoragePoolVO();
storagePool.setManaged(false);
when(_storagePoolDao.findById(anyLong())).thenReturn(storagePool);
CallContext.register(user, account);
try {
_userVmMgr.restoreVMInternal(_account, _vmMock, null);
} finally {
CallContext.unregister();
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class LocalStoragePoolAllocator method select.
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
if (!dskCh.useLocalStorage()) {
return null;
}
if (s_logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
List<StoragePoolVO> disabledPools = _storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
}
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
// data disk and host identified from deploying vm (attach volume case)
if (plan.getHostId() != null) {
List<StoragePoolVO> hostTagsPools = _storagePoolDao.findLocalStoragePoolsByHostAndTags(plan.getHostId(), dskCh.getTags());
for (StoragePoolVO pool : hostTagsPools) {
if (pool != null && pool.isLocal()) {
StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
suitablePools.add(storagePool);
} else {
avoid.addPool(pool.getId());
}
}
if (suitablePools.size() == returnUpTo) {
break;
}
}
} else {
if (plan.getPodId() == null) {
// zone wide primary storage deployment
return null;
}
List<StoragePoolVO> availablePools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags());
for (StoragePoolVO pool : availablePools) {
if (suitablePools.size() == returnUpTo) {
break;
}
StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
suitablePools.add(storagePool);
} else {
avoid.addPool(pool.getId());
}
}
// add remaining pools in cluster, that did not match tags, to avoid
// set
List<StoragePoolVO> allPools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), null);
allPools.removeAll(availablePools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
return suitablePools;
}
Aggregations