use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class DateraPrimaryDataStoreDriver method deleteSnapshot.
/**
* Deletes snapshot on Datera
* @param snapshotInfo snapshot information
* @param storagePoolId primary storage
* @throws UnsupportedEncodingException
* @throws DateraObject.DateraError
*/
private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError {
long csSnapshotId = snapshotInfo.getId();
try {
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID);
if (snapshotDetails != null && snapshotDetails.getValue() != null) {
// Native snapshot being used, delete that
String snapshotName = snapshotDetails.getValue();
DateraUtil.deleteVolumeSnapshot(conn, snapshotName);
// check if the underlying volume needs to be deleted
SnapshotVO snapshot = _snapshotDao.findById(csSnapshotId);
VolumeVO volume = _volumeDao.findById(snapshot.getVolumeId());
if (volume == null) {
// deleted from Cloudstack. Check if other snapshots are using this volume
volume = _volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId());
if (shouldDeleteVolume(snapshot.getVolumeId(), snapshot.getId())) {
DateraUtil.deleteAppInstance(conn, volume.getFolder());
}
}
} else {
// An App Instance is being used to support the CloudStack volume snapshot.
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.VOLUME_ID);
String appInstanceName = snapshotDetails.getValue();
DateraUtil.deleteAppInstance(conn, appInstanceName);
}
snapshotDetailsDao.removeDetails(csSnapshotId);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
// getUsedBytes(StoragePool) will not include the snapshot to delete because it
// has already been deleted by this point
long usedBytes = getUsedBytes(storagePool);
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
} catch (Exception ex) {
s_logger.debug("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, ex);
throw ex;
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class DateraPrimaryDataStoreDriver method createTemplateVolume.
/**
* This function gets invoked when we want to create a volume that caches the
* template on the primary storage. This 'template volume' will then be cloned
* to create new ROOT volumes.
*
* @param templateInfo Information about the template like id, size
* @param storagePoolId the primary store to create this volume on
* @return IQN of the template volume
*/
public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) {
s_logger.debug("createTemplateVolume() as cache template called");
verifySufficientBytesForStoragePool(templateInfo, storagePoolId);
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
String iqn = null;
String appInstanceName = null;
try {
long templateSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePoolDao.findById(storagePoolId));
s_logger.debug("cached VM template sizeBytes: " + toHumanReadableSize(templateSizeBytes));
int templateSizeGib = DateraUtil.bytesToGib(templateSizeBytes);
int templateIops = DateraUtil.MAX_IOPS;
int replicaCount = getNumReplicas(storagePoolId);
appInstanceName = getAppInstanceName(templateInfo);
String volumePlacement = getVolPlacement(storagePoolId);
String ipPool = getIpPool(storagePoolId);
s_logger.debug("cached VM template app_instance: " + appInstanceName + " ipPool: " + ipPool + " sizeGib: " + String.valueOf(templateSizeGib));
DateraObject.AppInstance appInstance = DateraUtil.createAppInstance(conn, appInstanceName, templateSizeGib, templateIops, replicaCount, volumePlacement, ipPool);
if (appInstance == null) {
throw new CloudRuntimeException("Unable to create Template volume " + templateInfo.getId());
}
iqn = appInstance.getIqn();
VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, templateInfo.getId(), null);
templatePoolRef.setInstallPath(DateraUtil.generateIqnPath(iqn));
templatePoolRef.setLocalDownloadPath(appInstance.getName());
templatePoolRef.setTemplateSize(DateraUtil.gibToBytes(appInstance.getSize()));
tmpltPoolDao.update(templatePoolRef.getId(), templatePoolRef);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = getUsedBytes(storagePool);
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
} catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) {
if (DateraObject.DateraErrorTypes.ConflictError.equals(dateraError)) {
String errMesg = "template app Instance " + appInstanceName + " exists";
s_logger.debug(errMesg, dateraError);
} else {
String errMesg = "Unable to create template app Instance " + dateraError.getMessage();
s_logger.error(errMesg, dateraError);
throw new CloudRuntimeException(errMesg, dateraError);
}
}
return DateraUtil.generateIqnPath(iqn);
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class DateraPrimaryDataStoreDriver method deleteVolume.
/**
* Deletes a volume from Datera. If we are using native snapshots, we first
* check if the volume is holding a native snapshot, if it does, then we don't
* delete it from Datera but instead mark it so that when the snapshot is
* deleted, we delete the volume
*
* @param volumeInfo The volume which needs to be deleted
* @param storagePoolId Primary storage where volume resides
*/
private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) {
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
Long volumeStoragePoolId = volumeInfo.getPoolId();
long volumeId = volumeInfo.getId();
if (volumeStoragePoolId == null) {
// this volume was never assigned to a storage pool, so no SAN volume should
return;
// exist for it
}
try {
// but remove it from cloudstack
if (shouldDeleteVolume(volumeId, null)) {
DateraUtil.deleteAppInstance(conn, getAppInstanceName(volumeInfo));
}
volumeDetailsDao.removeDetails(volumeId);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long usedBytes = getUsedBytes(storagePool, volumeId);
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
} catch (UnsupportedEncodingException | DateraObject.DateraError e) {
String errMesg = "Error deleting app instance for Volume : " + volumeInfo.getId();
s_logger.warn(errMesg, e);
throw new CloudRuntimeException(errMesg);
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class DateraHostListener method getTargets.
private List<Map<String, String>> getTargets(long clusterId, long storagePoolId) {
List<Map<String, String>> targets = new ArrayList<>();
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {
Long instanceId = volume.getInstanceId();
if (instanceId != null) {
VMInstanceVO vmInstance = _vmDao.findById(instanceId);
Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId();
if (hostIdForVm != null) {
HostVO hostForVm = _hostDao.findById(hostIdForVm);
if (hostForVm.getClusterId().equals(clusterId)) {
Map<String, String> details = new HashMap<>();
details.put(ModifyTargetsCommand.IQN, volume.get_iScsiName());
details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress());
details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
targets.add(details);
}
}
}
}
}
return targets;
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class LinstorPrimaryDataStoreDriverImpl method createAsync.
@Override
public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCallback<CreateCmdResult> callback) {
String devPath = null;
String errMsg = null;
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
try {
switch(vol.getType()) {
case VOLUME:
VolumeInfo volumeInfo = (VolumeInfo) vol;
VolumeVO volume = _volumeDao.findById(volumeInfo.getId());
s_logger.debug("createAsync - creating volume");
devPath = createVolume(volumeInfo, storagePool);
volume.setFolder("/dev/");
volume.setPoolId(storagePool.getId());
volume.setUuid(vol.getUuid());
volume.setPath(vol.getUuid());
_volumeDao.update(volume.getId(), volume);
break;
case SNAPSHOT:
s_logger.debug("createAsync - SNAPSHOT");
createVolumeFromSnapshot((SnapshotInfo) vol, storagePool);
break;
case TEMPLATE:
errMsg = "creating template - not supported";
s_logger.error("createAsync - " + errMsg);
break;
default:
errMsg = "Invalid DataObjectType (" + vol.getType() + ") passed to createAsync";
s_logger.error(errMsg);
}
} catch (Exception ex) {
errMsg = ex.getMessage();
s_logger.error("createAsync: " + errMsg);
if (callback == null) {
throw ex;
}
}
if (callback != null) {
CreateCmdResult result = new CreateCmdResult(devPath, new Answer(null, errMsg == null, errMsg));
result.setResult(errMsg);
callback.complete(result);
}
}
Aggregations