use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOVMSnapshotStrategy method takeVMSnapshot.
@Override
public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) {
UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
} catch (NoTransitionException e) {
throw new CloudRuntimeException(e.getMessage());
}
boolean result = false;
try {
Map<String, String> srcVolumeDestSnapshotMap = new HashMap<>();
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId());
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long prev_chain_size = 0;
long virtual_size = 0;
for (VolumeObjectTO volume : volumeTOs) {
String volumeSnapshotName = String.format("%s-%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), volume.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
srcVolumeDestSnapshotMap.put(ScaleIOUtil.getVolumePath(volume.getPath()), volumeSnapshotName);
virtual_size += volume.getSize();
VolumeVO volumeVO = volumeDao.findById(volume.getId());
prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize();
}
VMSnapshotTO current = null;
VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId());
if (currentSnapshot != null) {
current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot);
}
if (current == null)
vmSnapshotVO.setParent(null);
else
vmSnapshotVO.setParent(current.getId());
try {
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap);
if (snapshotGroup == null) {
throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool");
}
String snapshotGroupId = snapshotGroup.getSnapshotGroupId();
List<String> volumeIds = snapshotGroup.getVolumeIds();
if (volumeIds != null && !volumeIds.isEmpty()) {
List<VMSnapshotDetailsVO> vmSnapshotDetails = new ArrayList<VMSnapshotDetailsVO>();
vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "SnapshotGroupId", snapshotGroupId, false));
for (int index = 0; index < volumeIds.size(); index++) {
String volumeSnapshotName = srcVolumeDestSnapshotMap.get(ScaleIOUtil.getVolumePath(volumeTOs.get(index).getPath()));
String pathWithScaleIOVolumeName = ScaleIOUtil.updatedPathWithVolumeName(volumeIds.get(index), volumeSnapshotName);
vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "Vol_" + volumeTOs.get(index).getId() + "_Snapshot", pathWithScaleIOVolumeName, false));
}
vmSnapshotDetailsDao.saveDetails(vmSnapshotDetails);
}
finalizeCreate(vmSnapshotVO, volumeTOs);
result = true;
LOGGER.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName());
long new_chain_size = 0;
for (VolumeObjectTO volumeTo : volumeTOs) {
publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo);
new_chain_size += volumeTo.getSize();
}
publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size);
return vmSnapshot;
} catch (Exception e) {
String errMsg = "Unable to take vm snapshot due to: " + e.getMessage();
LOGGER.warn(errMsg, e);
throw new CloudRuntimeException(errMsg);
}
} finally {
if (!result) {
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
String subject = "Take snapshot failed for VM: " + userVm.getDisplayName();
String message = "Snapshot operation failed for VM: " + userVm.getDisplayName() + ", Please check and delete if any stale volumes created with VM snapshot id: " + vmSnapshot.getVmId();
alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT, userVm.getDataCenterId(), userVm.getPodIdToDeployIn(), subject, message);
} catch (NoTransitionException e1) {
LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage());
}
}
}
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method takeSnapshot.
@Override
public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback<CreateCmdResult> callback) {
LOGGER.debug("Taking PowerFlex volume snapshot");
Preconditions.checkArgument(snapshotInfo != null, "snapshotInfo cannot be null");
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null");
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
long storagePoolId = volumeVO.getPoolId();
Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0");
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null");
CreateCmdResult result;
try {
SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO) snapshotInfo.getTO();
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
final String scaleIOVolumeId = ScaleIOUtil.getVolumePath(volumeVO.getPath());
String snapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshotInfo.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null;
scaleIOVolume = client.takeSnapshot(scaleIOVolumeId, snapshotName);
if (scaleIOVolume == null) {
throw new CloudRuntimeException("Failed to take snapshot on PowerFlex cluster");
}
snapshotObjectTo.setPath(ScaleIOUtil.updatedPathWithVolumeName(scaleIOVolume.getId(), snapshotName));
CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo);
result = new CreateCmdResult(null, createObjectAnswer);
result.setResult(null);
} catch (Exception e) {
String errMsg = "Unable to take PowerFlex volume snapshot for volume: " + volumeInfo.getId() + " due to " + e.getMessage();
LOGGER.warn(errMsg);
result = new CreateCmdResult(null, new CreateObjectAnswer(e.toString()));
result.setResult(e.toString());
}
callback.complete(result);
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method resizeVolume.
private void resizeVolume(VolumeInfo volumeInfo) {
LOGGER.debug("Resizing PowerFlex volume");
Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null");
try {
String scaleIOVolumeId = ScaleIOUtil.getVolumePath(volumeInfo.getPath());
Long storagePoolId = volumeInfo.getPoolId();
ResizeVolumePayload payload = (ResizeVolumePayload) volumeInfo.getpayload();
long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize();
// Only increase size is allowed and size should be specified in granularity of 8 GB
if (newSizeInBytes <= volumeInfo.getSize()) {
throw new CloudRuntimeException("Only increase size is allowed for volume: " + volumeInfo.getName());
}
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null;
long newSizeInGB = newSizeInBytes / (1024 * 1024 * 1024);
long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8gbBoundary);
if (scaleIOVolume == null) {
throw new CloudRuntimeException("Failed to resize volume: " + volumeInfo.getName());
}
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
long oldVolumeSize = volume.getSize();
volume.setSize(scaleIOVolume.getSizeInKb() * 1024);
volumeDao.update(volume.getId(), volume);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = storagePool.getUsedBytes();
long newVolumeSize = volume.getSize();
usedBytes += newVolumeSize - oldVolumeSize;
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
} catch (Exception e) {
String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method getVolumeStats.
@Override
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumePath) {
Preconditions.checkArgument(storagePool != null, "storagePool cannot be null");
Preconditions.checkArgument(StringUtils.isNotEmpty(volumePath), "volumePath cannot be null");
try {
final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId());
VolumeStatistics volumeStatistics = client.getVolumeStatistics(ScaleIOUtil.getVolumePath(volumePath));
if (volumeStatistics != null) {
Long provisionedSizeInBytes = volumeStatistics.getNetProvisionedAddressesInBytes();
Long allocatedSizeInBytes = volumeStatistics.getAllocatedSizeInBytes();
return new Pair<Long, Long>(provisionedSizeInBytes, allocatedSizeInBytes);
}
} catch (Exception e) {
String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool.getId() + " due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
return null;
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method createTemplateVolume.
private String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) {
LOGGER.debug("Creating PowerFlex template volume");
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
Preconditions.checkArgument(templateInfo != null, "templateInfo cannot be null");
Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0");
Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null");
try {
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
final String scaleIOStoragePoolId = storagePool.getPath();
final Long sizeInBytes = templateInfo.getSize();
final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0));
final String scaleIOVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.TEMPLATE_PREFIX, templateInfo.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null;
scaleIOVolume = client.createVolume(scaleIOVolumeName, scaleIOStoragePoolId, (int) sizeInGb, Storage.ProvisioningType.THIN);
if (scaleIOVolume == null) {
throw new CloudRuntimeException("Failed to create template volume on PowerFlex cluster");
}
VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(storagePoolId, templateInfo.getId(), null);
String templatePath = ScaleIOUtil.updatedPathWithVolumeName(scaleIOVolume.getId(), scaleIOVolumeName);
templatePoolRef.setInstallPath(templatePath);
templatePoolRef.setLocalDownloadPath(scaleIOVolume.getId());
templatePoolRef.setTemplateSize(scaleIOVolume.getSizeInKb() * 1024);
vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = storagePool.getUsedBytes();
usedBytes += templatePoolRef.getTemplateSize();
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
return templatePath;
} catch (Exception e) {
String errMsg = "Unable to create PowerFlex template volume due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
}
Aggregations