Search in sources :

Example 66 with StoragePoolVO

use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.

the class KvmNonManagedStorageSystemDataMotionTest method configureTestInternalCanHandle.

private Map<VolumeInfo, DataStore> configureTestInternalCanHandle(boolean isManagedStorage, StoragePoolType storagePoolType) {
    VolumeObject volumeInfo = Mockito.spy(new VolumeObject());
    Mockito.doReturn(0l).when(volumeInfo).getPoolId();
    DataStore ds = Mockito.spy(new PrimaryDataStoreImpl());
    Mockito.doReturn(0l).when(ds).getId();
    Map<VolumeInfo, DataStore> volumeMap = new HashMap<>();
    volumeMap.put(volumeInfo, ds);
    StoragePoolVO storagePool = Mockito.spy(new StoragePoolVO());
    Mockito.doReturn(storagePoolType).when(storagePool).getPoolType();
    Mockito.doReturn(storagePool).when(primaryDataStoreDao).findById(0l);
    Mockito.doReturn(isManagedStorage).when(storagePool).isManaged();
    return volumeMap;
}
Also used : HashMap(java.util.HashMap) PrimaryDataStoreImpl(org.apache.cloudstack.storage.datastore.PrimaryDataStoreImpl) DataStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) VMTemplateStoragePoolVO(com.cloud.storage.VMTemplateStoragePoolVO) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) VolumeInfo(org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo) VolumeObject(org.apache.cloudstack.storage.volume.VolumeObject)

Example 67 with StoragePoolVO

use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.

the class ScaleIOVMSnapshotStrategy method takeVMSnapshot.

@Override
public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) {
    UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
    VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
    try {
        vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
    } catch (NoTransitionException e) {
        throw new CloudRuntimeException(e.getMessage());
    }
    boolean result = false;
    try {
        Map<String, String> srcVolumeDestSnapshotMap = new HashMap<>();
        List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
        final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId());
        StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
        long prev_chain_size = 0;
        long virtual_size = 0;
        for (VolumeObjectTO volume : volumeTOs) {
            String volumeSnapshotName = String.format("%s-%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), volume.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
            srcVolumeDestSnapshotMap.put(ScaleIOUtil.getVolumePath(volume.getPath()), volumeSnapshotName);
            virtual_size += volume.getSize();
            VolumeVO volumeVO = volumeDao.findById(volume.getId());
            prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize();
        }
        VMSnapshotTO current = null;
        VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId());
        if (currentSnapshot != null) {
            current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot);
        }
        if (current == null)
            vmSnapshotVO.setParent(null);
        else
            vmSnapshotVO.setParent(current.getId());
        try {
            final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
            SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap);
            if (snapshotGroup == null) {
                throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool");
            }
            String snapshotGroupId = snapshotGroup.getSnapshotGroupId();
            List<String> volumeIds = snapshotGroup.getVolumeIds();
            if (volumeIds != null && !volumeIds.isEmpty()) {
                List<VMSnapshotDetailsVO> vmSnapshotDetails = new ArrayList<VMSnapshotDetailsVO>();
                vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "SnapshotGroupId", snapshotGroupId, false));
                for (int index = 0; index < volumeIds.size(); index++) {
                    String volumeSnapshotName = srcVolumeDestSnapshotMap.get(ScaleIOUtil.getVolumePath(volumeTOs.get(index).getPath()));
                    String pathWithScaleIOVolumeName = ScaleIOUtil.updatedPathWithVolumeName(volumeIds.get(index), volumeSnapshotName);
                    vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "Vol_" + volumeTOs.get(index).getId() + "_Snapshot", pathWithScaleIOVolumeName, false));
                }
                vmSnapshotDetailsDao.saveDetails(vmSnapshotDetails);
            }
            finalizeCreate(vmSnapshotVO, volumeTOs);
            result = true;
            LOGGER.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName());
            long new_chain_size = 0;
            for (VolumeObjectTO volumeTo : volumeTOs) {
                publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo);
                new_chain_size += volumeTo.getSize();
            }
            publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size);
            return vmSnapshot;
        } catch (Exception e) {
            String errMsg = "Unable to take vm snapshot due to: " + e.getMessage();
            LOGGER.warn(errMsg, e);
            throw new CloudRuntimeException(errMsg);
        }
    } finally {
        if (!result) {
            try {
                vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
                String subject = "Take snapshot failed for VM: " + userVm.getDisplayName();
                String message = "Snapshot operation failed for VM: " + userVm.getDisplayName() + ", Please check and delete if any stale volumes created with VM snapshot id: " + vmSnapshot.getVmId();
                alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT, userVm.getDataCenterId(), userVm.getPodIdToDeployIn(), subject, message);
            } catch (NoTransitionException e1) {
                LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage());
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SnapshotGroup(org.apache.cloudstack.storage.datastore.api.SnapshotGroup) ConfigurationException(javax.naming.ConfigurationException) NoTransitionException(com.cloud.utils.fsm.NoTransitionException) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) VMSnapshotVO(com.cloud.vm.snapshot.VMSnapshotVO) UserVm(com.cloud.uservm.UserVm) VMSnapshotTO(com.cloud.agent.api.VMSnapshotTO) VolumeVO(com.cloud.storage.VolumeVO) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) NoTransitionException(com.cloud.utils.fsm.NoTransitionException) ScaleIOGatewayClient(org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) VolumeObjectTO(org.apache.cloudstack.storage.to.VolumeObjectTO) VMSnapshotDetailsVO(com.cloud.vm.snapshot.VMSnapshotDetailsVO)

Example 68 with StoragePoolVO

use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.

the class ClusterScopeStoragePoolAllocator method select.

@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
    s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool");
    if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
        return null;
    }
    List<StoragePool> suitablePools = new ArrayList<StoragePool>();
    long dcId = plan.getDataCenterId();
    Long podId = plan.getPodId();
    Long clusterId = plan.getClusterId();
    if (podId == null) {
        // only podId is passed into this call.
        return null;
    }
    if (dskCh.getTags() != null && dskCh.getTags().length != 0) {
        s_logger.debug("Looking for pools in dc: " + dcId + "  pod:" + podId + "  cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()) + ". Disabled pools will be ignored.");
    } else {
        s_logger.debug("Looking for pools in dc: " + dcId + "  pod:" + podId + "  cluster:" + clusterId + ". Disabled pools will be ignored.");
    }
    if (s_logger.isTraceEnabled()) {
        // Log the pools details that are ignored because they are in disabled state
        List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, ScopeType.CLUSTER);
        if (disabledPools != null && !disabledPools.isEmpty()) {
            for (StoragePoolVO pool : disabledPools) {
                s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
            }
        }
    }
    List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
    s_logger.debug("Found pools matching tags: " + pools);
    // add remaining pools in cluster, that did not match tags, to avoid set
    List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
    allPools.removeAll(pools);
    for (StoragePoolVO pool : allPools) {
        s_logger.debug("Adding pool " + pool + " to avoid set since it did not match tags");
        avoid.addPool(pool.getId());
    }
    if (pools.size() == 0) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("No storage pools available for " + ServiceOffering.StorageType.shared.toString() + " volume allocation, returning");
        }
        return suitablePools;
    }
    for (StoragePoolVO pool : pools) {
        if (suitablePools.size() == returnUpTo) {
            break;
        }
        StoragePool storagePool = (StoragePool) dataStoreMgr.getPrimaryDataStore(pool.getId());
        if (filter(avoid, storagePool, dskCh, plan)) {
            suitablePools.add(storagePool);
        } else {
            avoid.addPool(pool.getId());
        }
    }
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("ClusterScopeStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
    }
    return suitablePools;
}
Also used : StoragePool(com.cloud.storage.StoragePool) ArrayList(java.util.ArrayList) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO)

Example 69 with StoragePoolVO

use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.

the class LocalStoragePoolAllocator method select.

@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
    s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
    if (!bypassStorageTypeCheck && !dskCh.useLocalStorage()) {
        return null;
    }
    if (s_logger.isTraceEnabled()) {
        // Log the pools details that are ignored because they are in disabled state
        List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
        if (disabledPools != null && !disabledPools.isEmpty()) {
            for (StoragePoolVO pool : disabledPools) {
                s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
            }
        }
    }
    List<StoragePool> suitablePools = new ArrayList<StoragePool>();
    // data disk and host identified from deploying vm (attach volume case)
    if (plan.getHostId() != null) {
        List<StoragePoolVO> hostTagsPools = storagePoolDao.findLocalStoragePoolsByHostAndTags(plan.getHostId(), dskCh.getTags());
        for (StoragePoolVO pool : hostTagsPools) {
            if (pool != null && pool.isLocal()) {
                StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
                if (filter(avoid, storagePool, dskCh, plan)) {
                    s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
                    suitablePools.add(storagePool);
                } else {
                    avoid.addPool(pool.getId());
                }
            }
            if (suitablePools.size() == returnUpTo) {
                break;
            }
        }
    } else {
        if (plan.getPodId() == null) {
            // zone wide primary storage deployment
            return null;
        }
        List<StoragePoolVO> availablePools = storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags());
        for (StoragePoolVO pool : availablePools) {
            if (suitablePools.size() == returnUpTo) {
                break;
            }
            StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
            if (filter(avoid, storagePool, dskCh, plan)) {
                suitablePools.add(storagePool);
            } else {
                avoid.addPool(pool.getId());
            }
        }
        // add remaining pools in cluster, that did not match tags, to avoid
        // set
        List<StoragePoolVO> allPools = storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), null);
        allPools.removeAll(availablePools);
        for (StoragePoolVO pool : allPools) {
            avoid.addPool(pool.getId());
        }
    }
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
    }
    return suitablePools;
}
Also used : StoragePool(com.cloud.storage.StoragePool) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) ArrayList(java.util.ArrayList)

Example 70 with StoragePoolVO

use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.

the class ZoneWideStoragePoolAllocator method select.

@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
    LOGGER.debug("ZoneWideStoragePoolAllocator to find storage pool");
    if (!bypassStorageTypeCheck && dskCh.useLocalStorage()) {
        return null;
    }
    if (LOGGER.isTraceEnabled()) {
        // Log the pools details that are ignored because they are in disabled state
        List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE);
        if (disabledPools != null && !disabledPools.isEmpty()) {
            for (StoragePoolVO pool : disabledPools) {
                LOGGER.trace("Ignoring pool " + pool + " as it is in disabled state.");
            }
        }
    }
    List<StoragePool> suitablePools = new ArrayList<>();
    List<StoragePoolVO> storagePools = storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
    if (storagePools == null) {
        storagePools = new ArrayList<>();
    }
    List<StoragePoolVO> anyHypervisorStoragePools = new ArrayList<>();
    for (StoragePoolVO storagePool : storagePools) {
        if (HypervisorType.Any.equals(storagePool.getHypervisor())) {
            anyHypervisorStoragePools.add(storagePool);
        }
    }
    List<StoragePoolVO> storagePoolsByHypervisor = storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), dskCh.getHypervisorType());
    storagePools.retainAll(storagePoolsByHypervisor);
    storagePools.addAll(anyHypervisorStoragePools);
    // add remaining pools in zone, that did not match tags, to avoid set
    List<StoragePoolVO> allPools = storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
    allPools.removeAll(storagePools);
    for (StoragePoolVO pool : allPools) {
        avoid.addPool(pool.getId());
    }
    for (StoragePoolVO storage : storagePools) {
        if (suitablePools.size() == returnUpTo) {
            break;
        }
        StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(storage.getId());
        if (filter(avoid, storagePool, dskCh, plan)) {
            suitablePools.add(storagePool);
        } else {
            if (canAddStoragePoolToAvoidSet(storage)) {
                avoid.addPool(storagePool.getId());
            }
        }
    }
    return suitablePools;
}
Also used : StoragePool(com.cloud.storage.StoragePool) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) ArrayList(java.util.ArrayList)

Aggregations

StoragePoolVO (org.apache.cloudstack.storage.datastore.db.StoragePoolVO)276 CloudRuntimeException (com.cloud.utils.exception.CloudRuntimeException)106 VMTemplateStoragePoolVO (com.cloud.storage.VMTemplateStoragePoolVO)75 ArrayList (java.util.ArrayList)54 VolumeVO (com.cloud.storage.VolumeVO)53 HostVO (com.cloud.host.HostVO)46 InvalidParameterValueException (com.cloud.exception.InvalidParameterValueException)45 DataStore (org.apache.cloudstack.engine.subsystem.api.storage.DataStore)45 HashMap (java.util.HashMap)44 Answer (com.cloud.agent.api.Answer)38 VolumeInfo (org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo)35 StoragePool (com.cloud.storage.StoragePool)33 Test (org.junit.Test)33 VMInstanceVO (com.cloud.vm.VMInstanceVO)25 Map (java.util.Map)25 Account (com.cloud.user.Account)24 HypervisorType (com.cloud.hypervisor.Hypervisor.HypervisorType)20 ExecutionException (java.util.concurrent.ExecutionException)20 ConcurrentOperationException (com.cloud.exception.ConcurrentOperationException)19 ClusterVO (com.cloud.dc.ClusterVO)18