use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VolumeApiServiceImpl method createVolumeFromSnapshot.
protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId, Long vmId) throws StorageUnavailableException {
VolumeInfo createdVolume = null;
SnapshotVO snapshot = _snapshotDao.findById(snapshotId);
snapshot.getVolumeId();
UserVmVO vm = null;
if (vmId != null) {
vm = _userVmDao.findById(vmId);
}
// sync old snapshots to region store if necessary
createdVolume = _volumeMgr.createVolumeFromSnapshot(volume, snapshot, vm);
VolumeVO volumeVo = _volsDao.findById(createdVolume.getId());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(), createdVolume.getDiskOfferingId(), null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid(), volumeVo.isDisplayVolume());
return volumeVo;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VolumeApiServiceImpl method takeSnapshot.
@Override
@ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "taking snapshot", async = true)
public Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType) throws ResourceAllocationException {
VolumeInfo volume = volFactory.getVolume(volumeId);
if (volume == null) {
throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't exist");
}
if (volume.getState() != Volume.State.Ready) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
}
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId());
if (storagePoolVO.isManaged() && locationType == null) {
locationType = Snapshot.LocationType.PRIMARY;
}
VMInstanceVO vm = null;
if (volume.getInstanceId() != null)
vm = _vmInstanceDao.findById(volume.getInstanceId());
if (vm != null) {
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateTakeVolumeSnapshot(volumeId, policyId, snapshotId, account, quiescevm, locationType);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Snapshot> outcome = takeVolumeSnapshotThroughJobQueue(vm.getId(), volumeId, policyId, snapshotId, account.getId(), quiescevm, locationType);
try {
outcome.get();
} catch (InterruptedException e) {
throw new RuntimeException("Operation is interrupted", e);
} catch (java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution excetion", e);
}
Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException)
throw (ConcurrentOperationException) jobResult;
else if (jobResult instanceof ResourceAllocationException)
throw (ResourceAllocationException) jobResult;
else if (jobResult instanceof Throwable)
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
}
return _snapshotDao.findById(snapshotId);
}
} else {
CreateSnapshotPayload payload = new CreateSnapshotPayload();
payload.setSnapshotId(snapshotId);
payload.setSnapshotPolicyId(policyId);
payload.setAccount(account);
payload.setQuiescevm(quiescevm);
volume.addPayload(payload);
return volService.takeSnapshot(volume);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VMSnapshotManagerImpl method getVolumeTOList.
private List<VolumeObjectTO> getVolumeTOList(Long vmId) {
List<VolumeObjectTO> volumeTOs = new ArrayList<VolumeObjectTO>();
List<VolumeVO> volumeVos = _volumeDao.findByInstance(vmId);
VolumeInfo volumeInfo = null;
for (VolumeVO volume : volumeVos) {
volumeInfo = volumeDataFactory.getVolume(volume.getId());
volumeTOs.add((VolumeObjectTO) volumeInfo.getTO());
}
return volumeTOs;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class NexentaPrimaryDataStoreDriver method deleteAsync.
@Override
public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
String errorMessage = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeInfo volumeInfo = (VolumeInfo) data;
long storagePoolId = store.getId();
NexentaStorAppliance appliance = getNexentaStorAppliance(storagePoolId);
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
// _storagePoolDao.update(stoagePoolId);
} else {
errorMessage = String.format("Invalid DataObjectType(%s) passed to deleteAsync", data.getType());
}
CommandResult result = new CommandResult();
result.setResult(errorMessage);
callback.complete(result);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method verifySufficientBytesForStoragePool.
private void verifySufficientBytesForStoragePool(long storagePoolId, long volumeId, long newSize, Integer newHypervisorSnapshotReserve) {
DataStore primaryDataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
VolumeInfo volumeInfo = volumeFactory.getVolume(volumeId, primaryDataStore);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long currentSizeWithHsr = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePool);
newHypervisorSnapshotReserve = newHypervisorSnapshotReserve == null ? LOWEST_HYPERVISOR_SNAPSHOT_RESERVE : Math.max(newHypervisorSnapshotReserve, LOWEST_HYPERVISOR_SNAPSHOT_RESERVE);
long newSizeWithHsr = (long) (newSize + newSize * (newHypervisorSnapshotReserve / 100f));
if (newSizeWithHsr < currentSizeWithHsr) {
throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not support shrinking a volume.");
}
long availableBytes = storagePool.getCapacityBytes() - getUsedBytes(storagePool);
if ((newSizeWithHsr - currentSizeWithHsr) > availableBytes) {
throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not have enough space to expand the volume.");
}
}
Aggregations