use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeOrchestrator method prepareForMigration.
@Override
public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) {
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Preparing " + vols.size() + " volumes for " + vm);
}
for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore));
PrimaryDataStore primaryDataStore = (PrimaryDataStore) dataStore;
// This might impact other managed storages, grant access for PowerFlex storage pool only
if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) {
volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore);
}
vm.addDisk(disk);
}
// if (vm.getType() == VirtualMachine.Type.User && vm.getTemplate().getFormat() == ImageFormat.ISO) {
if (vm.getType() == VirtualMachine.Type.User) {
_tmpltMgr.prepareIsoForVmProfile(vm, dest);
// DataTO dataTO = tmplFactory.getTemplate(vm.getTemplate().getId(), DataStoreRole.Image, vm.getVirtualMachine().getDataCenterId()).getTO();
// DiskTO iso = new DiskTO(dataTO, 3L, null, Volume.Type.ISO);
// vm.addDisk(iso);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeOrchestrator method revokeAccess.
@Override
public void revokeAccess(long vmId, long hostId) {
HostVO host = _hostDao.findById(hostId);
List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
if (volumesForVm != null) {
for (VolumeVO volumeForVm : volumesForVm) {
VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId());
// pool id can be null for the VM's volumes in Allocated state
if (volumeForVm.getPoolId() != null) {
DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary);
volService.revokeAccess(volumeInfo, host, dataStore);
}
}
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeOrchestrator method migrateVolume.
@Override
@DB
public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
VolumeInfo vol = volFactory.getVolume(volume.getId());
if (vol == null) {
throw new CloudRuntimeException("Migrate volume failed because volume object of volume " + volume.getName() + "is null");
}
if (destPool == null) {
throw new CloudRuntimeException("Migrate volume failed because destination storage pool is not available!!");
}
checkConcurrentJobsPerDatastoreThreshhold(destPool);
DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, dataStoreTarget);
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.error("Migrate volume failed:" + result.getResult());
if (result.getResult() != null && result.getResult().contains("[UNSUPPORTED]")) {
throw new CloudRuntimeException("Migrate volume failed: " + result.getResult());
}
throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId());
} else {
// update the volumeId for snapshots on secondary
if (!_snapshotDao.listByVolumeId(vol.getId()).isEmpty()) {
_snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
_snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
}
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("migrate volume failed", e);
throw new CloudRuntimeException(e.getMessage());
} catch (ExecutionException e) {
s_logger.debug("migrate volume failed", e);
throw new CloudRuntimeException(e.getMessage());
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class QueryManagerImpl method searchForStoragePools.
@Override
public ListResponse<StoragePoolResponse> searchForStoragePools(ListStoragePoolsCmd cmd) {
Pair<List<StoragePoolJoinVO>, Integer> result = searchForStoragePoolsInternal(cmd);
ListResponse<StoragePoolResponse> response = new ListResponse<StoragePoolResponse>();
List<StoragePoolResponse> poolResponses = ViewResponseHelper.createStoragePoolResponse(result.first().toArray(new StoragePoolJoinVO[result.first().size()]));
for (StoragePoolResponse poolResponse : poolResponses) {
DataStore store = dataStoreManager.getPrimaryDataStore(poolResponse.getId());
if (store != null) {
DataStoreDriver driver = store.getDriver();
if (driver != null && driver.getCapabilities() != null) {
Map<String, String> caps = driver.getCapabilities();
if (Storage.StoragePoolType.NetworkFilesystem.toString().equals(poolResponse.getType()) && HypervisorType.VMware.toString().equals(poolResponse.getHypervisor())) {
StoragePoolVO pool = _storagePoolDao.findPoolByUUID(poolResponse.getId());
StoragePoolDetailVO detail = _storagePoolDetailsDao.findDetail(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString());
if (detail != null) {
caps.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), detail.getValue());
}
}
poolResponse.setCaps(caps);
}
}
}
response.setResponses(poolResponses, result.second());
return response;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class StoragePoolJoinDaoImpl method newStoragePoolResponse.
@Override
public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool) {
StoragePool storagePool = storagePoolDao.findById(pool.getId());
StoragePoolResponse poolResponse = new StoragePoolResponse();
poolResponse.setId(pool.getUuid());
poolResponse.setName(pool.getName());
poolResponse.setState(pool.getStatus());
String path = pool.getPath();
// cifs store may contain password entry, remove the password
path = StringUtils.cleanString(path);
poolResponse.setPath(path);
poolResponse.setIpAddress(pool.getHostAddress());
poolResponse.setZoneId(pool.getZoneUuid());
poolResponse.setZoneName(pool.getZoneName());
poolResponse.setType(pool.getPoolType().toString());
poolResponse.setPodId(pool.getPodUuid());
poolResponse.setPodName(pool.getPodName());
poolResponse.setCreated(pool.getCreated());
if (pool.getScope() != null) {
poolResponse.setScope(pool.getScope().toString());
}
if (pool.getHypervisor() != null) {
poolResponse.setHypervisor(pool.getHypervisor().toString());
}
StoragePoolDetailVO poolType = storagePoolDetailsDao.findDetail(pool.getId(), "pool_type");
if (poolType != null) {
poolResponse.setType(poolType.getValue());
}
long allocatedSize = pool.getUsedCapacity() + pool.getReservedCapacity();
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
List<StoragePoolVO> childDatastores = storagePoolDao.listChildStoragePoolsInDatastoreCluster(pool.getId());
if (childDatastores != null) {
for (StoragePoolVO childDatastore : childDatastores) {
StoragePoolJoinVO childDSJoinVO = findById(childDatastore.getId());
allocatedSize += (childDSJoinVO.getUsedCapacity() + childDSJoinVO.getReservedCapacity());
}
}
}
poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
poolResponse.setDiskSizeAllocated(allocatedSize);
poolResponse.setCapacityIops(pool.getCapacityIops());
if (storagePool.isManaged()) {
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver) store.getDriver();
long usedIops = driver.getUsedIops(storagePool);
poolResponse.setAllocatedIops(usedIops);
}
// TODO: StatsCollector does not persist data
StorageStats stats = ApiDBUtils.getStoragePoolStatistics(pool.getId());
if (stats != null) {
Long used = stats.getByteUsed();
poolResponse.setDiskSizeUsed(used);
}
poolResponse.setClusterId(pool.getClusterUuid());
poolResponse.setClusterName(pool.getClusterName());
poolResponse.setProvider(pool.getStorageProviderName());
poolResponse.setTags(pool.getTag());
poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId())));
// set async job
if (pool.getJobId() != null) {
poolResponse.setJobId(pool.getJobUuid());
poolResponse.setJobStatus(pool.getJobStatus());
}
poolResponse.setHasAnnotation(annotationDao.hasAnnotations(pool.getUuid(), AnnotationService.EntityType.PRIMARY_STORAGE.name(), accountManager.isRootAdmin(CallContext.current().getCallingAccount().getId())));
poolResponse.setObjectName("storagepool");
return poolResponse;
}
Aggregations