use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method takeSnapshot.
@Override
public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback<CreateCmdResult> callback) {
CreateCmdResult result = null;
try {
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
long sfVolumeId = Long.parseLong(volumeVO.getFolder());
long storagePoolId = volumeVO.getPoolId();
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long capacityBytes = storagePool.getCapacityBytes();
// getUsedBytes(StoragePool) will not include the bytes of the proposed new volume or snapshot because
// updateSnapshotDetails has not yet been called for this new volume or snapshot
long usedBytes = getUsedBytes(storagePool);
long sfVolumeSize = sfVolume.getTotalSize();
usedBytes += sfVolumeSize;
// that is serving as the volume the snapshot is of (either a new SolidFire volume or a SolidFire snapshot).
if (usedBytes > capacityBytes) {
throw new CloudRuntimeException("Insufficient amount of space remains in this primary storage to take a snapshot");
}
storagePool.setUsedBytes(usedBytes);
SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO) snapshotInfo.getTO();
if (shouldTakeSnapshot(snapshotInfo.getId())) {
// We are supposed to take a SolidFire snapshot to serve as the back-end for our CloudStack volume snapshot.
String sfNewSnapshotName = volumeInfo.getName() + "-" + snapshotInfo.getUuid();
long sfNewSnapshotId = SolidFireUtil.createSnapshot(sfConnection, sfVolumeId, sfNewSnapshotName, getSnapshotAttributes(snapshotInfo));
updateSnapshotDetails(snapshotInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize);
snapshotObjectTo.setPath("SfSnapshotId=" + sfNewSnapshotId);
} else {
// We are supposed to create a new SolidFire volume to serve as the back-end for our CloudStack volume snapshot.
String sfNewVolumeName = volumeInfo.getName() + "-" + snapshotInfo.getUuid();
final Iops iops = getIops(MIN_IOPS_FOR_SNAPSHOT_VOLUME, MAX_IOPS_FOR_SNAPSHOT_VOLUME, storagePoolId);
long sfNewVolumeId = SolidFireUtil.createVolume(sfConnection, sfNewVolumeName, sfVolume.getAccountId(), sfVolumeSize, sfVolume.isEnable512e(), getSnapshotAttributes(snapshotInfo), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
SolidFireUtil.SolidFireVolume sfNewVolume = SolidFireUtil.getVolume(sfConnection, sfNewVolumeId);
updateSnapshotDetails(snapshotInfo.getId(), sfNewVolumeId, storagePoolId, sfVolumeSize, sfNewVolume.getIqn());
snapshotObjectTo.setPath("SfVolumeId=" + sfNewVolumeId);
}
// Now that we have successfully created a volume or a snapshot, update the space usage in the cloud.storage_pool table
// (even though cloud.storage_pool.used_bytes is likely no longer in use).
storagePoolDao.update(storagePoolId, storagePool);
CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo);
result = new CreateCmdResult(null, createObjectAnswer);
result.setResult(null);
} catch (Exception ex) {
LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex);
result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString()));
result.setResult(ex.toString());
}
callback.complete(result);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class NexentaPrimaryDataStoreDriver method createAsync.
@Override
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) {
String iqn = null;
String errorMessage = null;
if (dataObject.getType() != DataObjectType.VOLUME) {
errorMessage = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
} else {
VolumeInfo volumeInfo = (VolumeInfo) dataObject;
long storagePoolId = dataStore.getId();
NexentaStorAppliance appliance = getNexentaStorAppliance(storagePoolId);
// TODO: maybe we should use md5(volume name) as volume name
NexentaStorZvol zvol = (NexentaStorZvol) appliance.createVolume(volumeInfo.getName(), volumeInfo.getSize());
iqn = zvol.getIqn();
VolumeVO volume = this._volumeDao.findById(volumeInfo.getId());
volume.set_iScsiName(iqn);
volume.setFolder(zvol.getName());
volume.setPoolType(Storage.StoragePoolType.IscsiLUN);
volume.setPoolId(storagePoolId);
_volumeDao.update(volume.getId(), volume);
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = storagePool.getUsedBytes();
usedBytes += volumeInfo.getSize();
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
_storagePoolDao.update(storagePoolId, storagePool);
}
CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errorMessage == null, errorMessage));
result.setResult(errorMessage);
callback.complete(result);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method getDataObjectSizeIncludingHypervisorSnapshotReserve.
@Override
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
long volumeSize = 0;
if (dataObject.getType() == DataObjectType.VOLUME) {
VolumeInfo volume = (VolumeInfo) dataObject;
volumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(volume.getSize(), volume.getHypervisorSnapshotReserve());
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
TemplateInfo templateInfo = (TemplateInfo) dataObject;
// TemplateInfo sometimes has a size equal to null.
long templateSize = templateInfo.getSize() != null ? templateInfo.getSize() : 0;
volumeSize = (long) (templateSize + templateSize * (LOWEST_HYPERVISOR_SNAPSHOT_RESERVE / 100f));
}
return volumeSize;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method resize.
@Override
public void resize(DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) {
String iqn = null;
String errMsg = null;
if (dataObject.getType() == DataObjectType.VOLUME) {
VolumeInfo volumeInfo = (VolumeInfo) dataObject;
iqn = volumeInfo.get_iScsiName();
long storagePoolId = volumeInfo.getPoolId();
long sfVolumeId = Long.parseLong(volumeInfo.getFolder());
ResizeVolumePayload payload = (ResizeVolumePayload) volumeInfo.getpayload();
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
verifySufficientIopsForStoragePool(storagePoolId, volumeInfo.getId(), payload.newMinIops);
verifySufficientBytesForStoragePool(storagePoolId, volumeInfo.getId(), payload.newSize, payload.newHypervisorSnapshotReserve);
long sfNewVolumeSize = sfVolume.getTotalSize();
Integer hsr = volumeInfo.getHypervisorSnapshotReserve();
if (payload.newSize != null || payload.newHypervisorSnapshotReserve != null) {
if (payload.newHypervisorSnapshotReserve != null) {
if (hsr != null) {
if (payload.newHypervisorSnapshotReserve > hsr) {
hsr = payload.newHypervisorSnapshotReserve;
}
} else {
hsr = payload.newHypervisorSnapshotReserve;
}
}
sfNewVolumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(payload.newSize, hsr);
}
Map<String, String> mapAttributes = new HashMap<>();
mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId()));
mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize));
SolidFireUtil.modifyVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes, payload.newMinIops, payload.newMaxIops, getDefaultBurstIops(storagePoolId, payload.newMaxIops));
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
volume.setMinIops(payload.newMinIops);
volume.setMaxIops(payload.newMaxIops);
volume.setHypervisorSnapshotReserve(hsr);
volumeDao.update(volume.getId(), volume);
// SolidFireUtil.VOLUME_SIZE was introduced in 4.5.
updateVolumeDetails(volume.getId(), sfNewVolumeSize);
} else {
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize";
}
CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg));
result.setResult(errMsg);
callback.complete(result);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method getSolidFireVolumeId.
private long getSolidFireVolumeId(DataObject dataObject, boolean grantAccess) {
if (dataObject.getType() == DataObjectType.VOLUME) {
final VolumeInfo volumeInfo = (VolumeInfo) dataObject;
final long volumeId = volumeInfo.getId();
if (grantAccess && isBasicGrantAccess(volumeId)) {
volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_GRANT_ACCESS);
final Long sfVolumeId = getBasicSfVolumeId(volumeId);
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic grant access).");
return sfVolumeId;
} else if (!grantAccess && isBasicRevokeAccess(volumeId)) {
volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_REVOKE_ACCESS);
final Long sfVolumeId = getBasicSfVolumeId(volumeId);
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic revoke access).");
return sfVolumeId;
}
return Long.parseLong(volumeInfo.getFolder());
}
if (dataObject.getType() == DataObjectType.SNAPSHOT) {
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(dataObject.getId(), SolidFireUtil.VOLUME_ID);
if (snapshotDetails == null || snapshotDetails.getValue() == null) {
throw new CloudRuntimeException("Unable to locate the volume ID associated with the following snapshot ID: " + dataObject.getId());
}
return Long.parseLong(snapshotDetails.getValue());
}
if (dataObject.getType() == DataObjectType.TEMPLATE) {
return getVolumeIdFrom_iScsiPath(((TemplateInfo) dataObject).getInstallPath());
}
throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject, boolean)");
}
Aggregations