use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeDataFactoryImpl method listVolumeOnCache.
@Override
public List<VolumeInfo> listVolumeOnCache(long volumeId) {
List<VolumeInfo> cacheVols = new ArrayList<VolumeInfo>();
// find all image cache stores for this zone scope
List<DataStore> cacheStores = storeMgr.listImageCacheStores();
if (cacheStores == null || cacheStores.size() == 0) {
return cacheVols;
}
for (DataStore store : cacheStores) {
// check if the volume is stored there
VolumeDataStoreVO volStore = volumeStoreDao.findByStoreVolume(store.getId(), volumeId);
if (volStore != null) {
VolumeInfo vol = getVolume(volumeId, store);
cacheVols.add(vol);
}
}
return cacheVols;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeDataStoreDaoImpl method duplicateCacheRecordsOnRegionStore.
@Override
public void duplicateCacheRecordsOnRegionStore(long storeId) {
// find all records on image cache
List<DataStore> cacheStores = storeMgr.listImageCacheStores();
if (cacheStores == null || cacheStores.size() == 0) {
return;
}
List<VolumeDataStoreVO> vols = new ArrayList<VolumeDataStoreVO>();
for (DataStore store : cacheStores) {
// check if the volume is stored there
vols.addAll(listByStoreId(store.getId()));
}
// create an entry for each record, but with empty install path since the content is not yet on region-wide store yet
if (vols != null) {
s_logger.info("Duplicate " + vols.size() + " volume cache store records to region store");
for (VolumeDataStoreVO vol : vols) {
VolumeDataStoreVO volStore = findByStoreVolume(storeId, vol.getVolumeId());
if (volStore != null) {
s_logger.info("There is already entry for volume " + vol.getVolumeId() + " on region store " + storeId);
continue;
}
s_logger.info("Persisting an entry for volume " + vol.getVolumeId() + " on region store " + storeId);
VolumeDataStoreVO vs = new VolumeDataStoreVO();
vs.setVolumeId(vol.getVolumeId());
vs.setDataStoreId(storeId);
vs.setState(vol.getState());
vs.setDownloadPercent(vol.getDownloadPercent());
vs.setDownloadState(vol.getDownloadState());
vs.setSize(vol.getSize());
vs.setPhysicalSize(vol.getPhysicalSize());
vs.setErrorString(vol.getErrorString());
vs.setRefCnt(vol.getRefCnt());
persist(vs);
// increase ref_cnt so that this will not be recycled before the content is pushed to region-wide store
vol.incrRefCnt();
this.update(vol.getId(), vol);
}
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class SolidFireSharedPrimaryDataStoreLifeCycle method initialize.
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
final String CAPACITY_IOPS = "capacityIops";
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get(CAPACITY_IOPS);
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (clusterId == null) {
throw new CloudRuntimeException("The Cluster ID must be specified.");
}
String storageVip = SolidFireUtil.getStorageVip(url);
int storagePort = SolidFireUtil.getStoragePort(url);
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
String datacenter = SolidFireUtil.getValue(SolidFireUtil.DATACENTER, url, false);
if (HypervisorType.VMware.equals(hypervisorType) && datacenter == null) {
throw new CloudRuntimeException("'Datacenter' must be set for hypervisor type of " + HypervisorType.VMware);
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setType(getStorageType(hypervisorType));
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(false);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(hypervisorType);
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = SolidFireUtil.getManagementVip(url);
int managementPort = SolidFireUtil.getManagementPort(url);
details.put(SolidFireUtil.MANAGEMENT_VIP, managementVip);
details.put(SolidFireUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
String clusterAdminUsername = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, url);
details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
long lMinIops = 100;
long lMaxIops = 15000;
long lBurstIops = 15000;
try {
String minIops = SolidFireUtil.getValue(SolidFireUtil.MIN_IOPS, url);
if (minIops != null && minIops.trim().length() > 0) {
lMinIops = Long.parseLong(minIops);
}
} catch (Exception ex) {
s_logger.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage());
}
try {
String maxIops = SolidFireUtil.getValue(SolidFireUtil.MAX_IOPS, url);
if (maxIops != null && maxIops.trim().length() > 0) {
lMaxIops = Long.parseLong(maxIops);
}
} catch (Exception ex) {
s_logger.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage());
}
try {
String burstIops = SolidFireUtil.getValue(SolidFireUtil.BURST_IOPS, url);
if (burstIops != null && burstIops.trim().length() > 0) {
lBurstIops = Long.parseLong(burstIops);
}
} catch (Exception ex) {
s_logger.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage());
}
if (lMinIops > lMaxIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MIN_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.MAX_IOPS + "'.");
}
if (lMaxIops > lBurstIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MAX_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.BURST_IOPS + "'.");
}
if (lMinIops != capacityIops) {
throw new CloudRuntimeException("The parameter '" + CAPACITY_IOPS + "' must be equal to the parameter '" + SolidFireUtil.MIN_IOPS + "'.");
}
if (lMinIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
}
details.put(SolidFireUtil.MIN_IOPS, String.valueOf(lMinIops));
details.put(SolidFireUtil.MAX_IOPS, String.valueOf(lMaxIops));
details.put(SolidFireUtil.BURST_IOPS, String.valueOf(lBurstIops));
SolidFireUtil.SolidFireConnection sfConnection = new SolidFireUtil.SolidFireConnection(managementVip, managementPort, clusterAdminUsername, clusterAdminPassword);
SolidFireCreateVolume sfCreateVolume = createSolidFireVolume(sfConnection, storagePoolName, capacityBytes, lMinIops, lMaxIops, lBurstIops);
SolidFireUtil.SolidFireVolume sfVolume = sfCreateVolume.getVolume();
String iqn = sfVolume.getIqn();
details.put(SolidFireUtil.VOLUME_ID, String.valueOf(sfVolume.getId()));
parameters.setUuid(iqn);
if (HypervisorType.VMware.equals(hypervisorType)) {
String datastore = iqn.replace("/", "_");
String path = "/" + datacenter + "/" + datastore;
parameters.setHost("VMFS datastore: " + path);
parameters.setPort(0);
parameters.setPath(path);
details.put(SolidFireUtil.DATASTORE_NAME, datastore);
details.put(SolidFireUtil.IQN, iqn);
details.put(SolidFireUtil.STORAGE_VIP, storageVip);
details.put(SolidFireUtil.STORAGE_PORT, String.valueOf(storagePort));
} else {
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(iqn);
}
ClusterVO cluster = _clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
s_logger.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
DataStore dataStore = null;
try {
// this adds a row in the cloud.storage_pool table for this SolidFire volume
dataStore = _primaryDataStoreHelper.createPrimaryDataStore(parameters);
// now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and
// place the newly created volume in the Volume Access Group
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolume.getId(), dataStore.getId(), cluster.getUuid(), hosts, _clusterDetailsDao);
SolidFireUtil.SolidFireAccount sfAccount = sfCreateVolume.getAccount();
Account csAccount = CallContext.current().getCallingAccount();
SolidFireUtil.updateCsDbWithSolidFireAccountInfo(csAccount.getId(), sfAccount, dataStore.getId(), _accountDetailsDao);
} catch (Exception ex) {
_primaryDataStoreDao.expunge(dataStore.getId());
throw new CloudRuntimeException(ex.getMessage());
} finally {
lock.unlock();
lock.releaseRef();
}
return dataStore;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class QueryManagerImpl method searchForStoragePools.
@Override
public ListResponse<StoragePoolResponse> searchForStoragePools(ListStoragePoolsCmd cmd) {
Pair<List<StoragePoolJoinVO>, Integer> result = searchForStoragePoolsInternal(cmd);
ListResponse<StoragePoolResponse> response = new ListResponse<StoragePoolResponse>();
List<StoragePoolResponse> poolResponses = ViewResponseHelper.createStoragePoolResponse(result.first().toArray(new StoragePoolJoinVO[result.first().size()]));
for (StoragePoolResponse poolResponse : poolResponses) {
DataStore store = dataStoreManager.getPrimaryDataStore(poolResponse.getId());
if (store != null) {
DataStoreDriver driver = store.getDriver();
if (driver != null && driver.getCapabilities() != null) {
poolResponse.setCaps(driver.getCapabilities());
}
}
}
response.setResponses(poolResponses, result.second());
return response;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class ObjectInDataStoreManagerImpl method delete.
@Override
public boolean delete(DataObject dataObj) {
long objId = dataObj.getId();
DataStore dataStore = dataObj.getDataStore();
if (dataStore.getRole() == DataStoreRole.Primary) {
if (dataObj.getType() == DataObjectType.TEMPLATE) {
VMTemplateStoragePoolVO destTmpltPool = templatePoolDao.findByPoolTemplate(dataStore.getId(), objId);
if (destTmpltPool != null) {
return templatePoolDao.remove(destTmpltPool.getId());
} else {
s_logger.warn("Template " + objId + " is not found on storage pool " + dataStore.getId() + ", so no need to delete");
return true;
}
}
} else {
// Image store
switch(dataObj.getType()) {
case TEMPLATE:
TemplateDataStoreVO destTmpltStore = templateDataStoreDao.findByStoreTemplate(dataStore.getId(), objId);
if (destTmpltStore != null) {
return templateDataStoreDao.remove(destTmpltStore.getId());
} else {
s_logger.warn("Template " + objId + " is not found on image store " + dataStore.getId() + ", so no need to delete");
return true;
}
case SNAPSHOT:
SnapshotDataStoreVO destSnapshotStore = snapshotDataStoreDao.findByStoreSnapshot(dataStore.getRole(), dataStore.getId(), objId);
if (destSnapshotStore != null) {
return snapshotDataStoreDao.remove(destSnapshotStore.getId());
} else {
s_logger.warn("Snapshot " + objId + " is not found on image store " + dataStore.getId() + ", so no need to delete");
return true;
}
case VOLUME:
VolumeDataStoreVO destVolumeStore = volumeDataStoreDao.findByStoreVolume(dataStore.getId(), objId);
if (destVolumeStore != null) {
return volumeDataStoreDao.remove(destVolumeStore.getId());
} else {
s_logger.warn("Volume " + objId + " is not found on image store " + dataStore.getId() + ", so no need to delete");
return true;
}
}
}
s_logger.warn("Unsupported data object (" + dataObj.getType() + ", " + dataObj.getDataStore() + ")");
return false;
}
Aggregations