use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class SolidFirePrimaryDataStoreLifeCycle method deleteDataStore.
// invoked to delete primary storage that is based on the SolidFire plug-in
@Override
public boolean deleteDataStore(DataStore dataStore) {
long storagePoolId = dataStore.getId();
List<SnapshotVO> lstSnapshots = _snapshotDao.listAll();
if (lstSnapshots != null) {
for (SnapshotVO snapshot : lstSnapshots) {
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.STORAGE_POOL_ID);
// if this snapshot belongs to the storagePool that was passed in
if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePoolId) {
throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more snapshots.");
}
}
}
List<VMTemplateStoragePoolVO> lstTemplatePoolRefs = _tmpltPoolDao.listByPoolId(storagePoolId);
if (lstTemplatePoolRefs != null) {
for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) {
try {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
long sfTemplateVolumeId = Long.parseLong(templatePoolRef.getLocalDownloadPath());
SolidFireUtil.deleteVolume(sfConnection, sfTemplateVolumeId);
} catch (Exception ex) {
s_logger.error(ex.getMessage() != null ? ex.getMessage() : "Error deleting SolidFire template volume");
}
_tmpltPoolDao.remove(templatePoolRef.getId());
}
}
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
storagePool.setUsedBytes(0);
_storagePoolDao.update(storagePoolId, storagePool);
_storagePoolDetailsDao.removeDetails(storagePoolId);
return _dataStoreHelper.deletePrimaryDataStore(dataStore);
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class LinstorPrimaryDataStoreLifeCycleImpl method updateStoragePool.
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
StoragePoolVO storagePoolVo = _primaryDataStoreDao.findById(storagePool.getId());
String strCapacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES);
Long capacityBytes = strCapacityBytes != null ? Long.parseLong(strCapacityBytes) : null;
if (capacityBytes != null) {
long usedBytes = _capacityMgr.getUsedBytes(storagePoolVo);
if (capacityBytes < usedBytes) {
throw new CloudRuntimeException("Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes");
}
}
String strCapacityIops = details.get(PrimaryDataStoreLifeCycle.CAPACITY_IOPS);
Long capacityIops = strCapacityIops != null ? Long.parseLong(strCapacityIops) : null;
if (capacityIops != null) {
long usedIops = _capacityMgr.getUsedIops(storagePoolVo);
if (capacityIops < usedIops) {
throw new CloudRuntimeException("Cannot reduce the number of IOPS for this storage pool as it would lead to an insufficient number of IOPS");
}
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycleTest method testDeleteDataStore.
@Test
public void testDeleteDataStore() {
final PrimaryDataStore store = mock(PrimaryDataStore.class);
final StoragePoolVO storagePoolVO = mock(StoragePoolVO.class);
when(primaryDataStoreDao.findById(anyLong())).thenReturn(storagePoolVO);
List<VMTemplateStoragePoolVO> unusedTemplates = new ArrayList<>();
when(templateMgr.getUnusedTemplatesInPool(storagePoolVO)).thenReturn(unusedTemplates);
List<StoragePoolHostVO> poolHostVOs = new ArrayList<>();
when(storagePoolHostDao.listByPoolId(anyLong())).thenReturn(poolHostVOs);
when(dataStoreHelper.deletePrimaryDataStore(any(DataStore.class))).thenReturn(true);
final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.deleteDataStore(store);
assertThat(result).isTrue();
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method updateStoragePool.
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
String capacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES);
StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(storagePool.getId());
try {
if (capacityBytes == null || capacityBytes.isBlank()) {
return;
}
long usedBytes = capacityMgr.getUsedBytes(storagePoolVO);
if (Long.parseLong(capacityBytes) < usedBytes) {
throw new CloudRuntimeException("Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes");
}
primaryDataStoreDao.updateCapacityBytes(storagePool.getId(), Long.parseLong(capacityBytes));
LOGGER.info("Storage pool successfully updated");
} catch (Throwable e) {
throw new CloudRuntimeException("Failed to update the storage pool" + e);
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method initialize.
@SuppressWarnings("unchecked")
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String dataStoreName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (zoneId == null) {
throw new CloudRuntimeException("Zone Id must be specified.");
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
if (clusterId != null) {
// Primary datastore is cluster-wide, check and set the podId and clusterId parameters
if (podId == null) {
throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage.");
}
Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString());
}
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
} else if (podId != null) {
throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage.");
}
URI uri = null;
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null || !uri.getScheme().equalsIgnoreCase("powerflex")) {
throw new InvalidParameterValueException("scheme is invalid for url: " + url + ", should be powerflex://username:password@gatewayhost/pool");
}
} catch (Exception ignored) {
throw new InvalidParameterValueException(url + " is not a valid uri");
}
String storagePoolName = null;
try {
storagePoolName = URLDecoder.decode(uri.getPath(), "UTF-8");
} catch (UnsupportedEncodingException e) {
LOGGER.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e);
}
if (storagePoolName == null) {
// if decoding fails, use getPath() anyway
storagePoolName = uri.getPath();
}
storagePoolName = storagePoolName.replaceFirst("/", "");
final String storageHost = uri.getHost();
final int port = uri.getPort();
String gatewayApiURL = null;
if (port == -1) {
gatewayApiURL = String.format("https://%s/api", storageHost);
} else {
gatewayApiURL = String.format("https://%s:%d/api", storageHost, port);
}
final String userInfo = uri.getUserInfo();
final String gatewayUsername = userInfo.split(":")[0];
final String gatewayPassword = userInfo.split(":")[1];
List<StoragePoolVO> storagePoolVO = primaryDataStoreDao.findPoolsByProvider(ScaleIOUtil.PROVIDER_NAME);
if (CollectionUtils.isNotEmpty(storagePoolVO)) {
for (StoragePoolVO poolVO : storagePoolVO) {
Map<String, String> poolDetails = primaryDataStoreDao.getDetails(poolVO.getId());
String poolUrl = poolDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT);
String poolName = poolDetails.get(ScaleIOGatewayClient.STORAGE_POOL_NAME);
if (gatewayApiURL.equals(poolUrl) && storagePoolName.equals(poolName)) {
throw new IllegalArgumentException("PowerFlex storage pool: " + storagePoolName + " already exists, please specify other storage pool.");
}
}
}
final org.apache.cloudstack.storage.datastore.api.StoragePool scaleIOPool = this.findStoragePool(gatewayApiURL, gatewayUsername, gatewayPassword, storagePoolName);
parameters.setZoneId(zoneId);
parameters.setName(dataStoreName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setHost(storageHost);
parameters.setPath(scaleIOPool.getId());
parameters.setUserInfo(userInfo);
parameters.setType(Storage.StoragePoolType.PowerFlex);
parameters.setHypervisorType(Hypervisor.HypervisorType.KVM);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setTags(tags);
StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics();
if (poolStatistics != null) {
if (capacityBytes == null) {
parameters.setCapacityBytes(poolStatistics.getNetMaxCapacityInBytes());
}
parameters.setUsedBytes(poolStatistics.getNetUsedCapacityInBytes());
}
if (capacityBytes != null) {
parameters.setCapacityBytes(capacityBytes);
}
if (capacityIops != null) {
parameters.setCapacityIops(capacityIops);
}
details.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, gatewayApiURL);
details.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, DBEncryptionUtil.encrypt(gatewayUsername));
details.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, DBEncryptionUtil.encrypt(gatewayPassword));
details.put(ScaleIOGatewayClient.STORAGE_POOL_NAME, storagePoolName);
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, scaleIOPool.getSystemId());
parameters.setDetails(details);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
Aggregations