use of com.cloud.framework.jobs.AsyncJobExecutionContext in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method takeSnapshot.
@Override
@ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "taking snapshot", async = true)
public Snapshot takeSnapshot(final Long volumeId, final Long policyId, final Long snapshotId, final Account account, final boolean quiescevm) throws ResourceAllocationException {
final VolumeInfo volume = volFactory.getVolume(volumeId);
if (volume == null) {
throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't exist");
}
if (volume.getState() != Volume.State.Ready) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
}
VMInstanceVO vm = null;
if (volume.getInstanceId() != null) {
vm = _vmInstanceDao.findById(volume.getInstanceId());
}
if (vm != null) {
// serialize VM operation
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
final VmWorkJobVO placeHolder;
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateTakeVolumeSnapshot(volumeId, policyId, snapshotId, account, quiescevm);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
final Outcome<Snapshot> outcome = takeVolumeSnapshotThroughJobQueue(vm.getId(), volumeId, policyId, snapshotId, account.getId(), quiescevm);
try {
outcome.get();
} catch (final InterruptedException e) {
throw new RuntimeException("Operation is interrupted", e);
} catch (final java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution excetion", e);
}
final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof ResourceAllocationException) {
throw (ResourceAllocationException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
}
}
return _snapshotDao.findById(snapshotId);
}
} else {
final CreateSnapshotPayload payload = new CreateSnapshotPayload();
payload.setSnapshotId(snapshotId);
payload.setAccount(account);
payload.setQuiescevm(quiescevm);
volume.addPayload(payload);
return volService.takeSnapshot(volume);
}
}
use of com.cloud.framework.jobs.AsyncJobExecutionContext in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method attachVolumeToVM.
public Volume attachVolumeToVM(final Long vmId, final Long volumeId, final Long deviceId) {
final Account caller = CallContext.current().getCallingAccount();
// Check that the volume ID is valid
final VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
// Check that the volume is a data volume
if (volumeToAttach == null || !(volumeToAttach.getVolumeType() == Volume.Type.DATADISK || volumeToAttach.getVolumeType() == Volume.Type.ROOT)) {
throw new InvalidParameterValueException("Please specify a volume with the valid type: " + Volume.Type.ROOT.toString() + " or " + Volume.Type.DATADISK.toString());
}
// Check that the volume is not currently attached to any VM
if (volumeToAttach.getInstanceId() != null) {
throw new InvalidParameterValueException("Please specify a volume that is not attached to any VM.");
}
// Check that the volume is not destroyed
if (volumeToAttach.getState() == Volume.State.Destroy) {
throw new InvalidParameterValueException("Please specify a volume that is not destroyed.");
}
// Check that the virtual machine ID is valid and it's a user vm
final UserVmVO vm = _userVmDao.findById(vmId);
if (vm == null || vm.getType() != VirtualMachine.Type.User) {
throw new InvalidParameterValueException("Please specify a valid User VM.");
}
// Check that the VM is in the correct state
if (vm.getState() != State.Running && vm.getState() != State.Stopped) {
throw new InvalidParameterValueException("Please specify a VM that is either running or stopped.");
}
// Check that the VM and the volume are in the same zone
if (vm.getDataCenterId() != volumeToAttach.getDataCenterId()) {
throw new InvalidParameterValueException("Please specify a VM that is in the same zone as the volume.");
}
// Check that the device ID is valid
if (deviceId != null) {
// validate ROOT volume type
if (deviceId.longValue() == 0) {
validateRootVolumeDetachAttach(_volsDao.findById(volumeToAttach.getId()), vm);
// vm shouldn't have any volume with deviceId 0
if (!_volsDao.findByInstanceAndDeviceId(vm.getId(), 0).isEmpty()) {
throw new InvalidParameterValueException("Vm already has root volume attached to it");
}
// volume can't be in Uploaded state
if (volumeToAttach.getState() == Volume.State.Uploaded) {
throw new InvalidParameterValueException("No support for Root volume attach in state " + Volume.State.Uploaded);
}
}
}
// that supported by hypervisor
if (deviceId == null || deviceId.longValue() != 0) {
final List<VolumeVO> existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
final int maxDataVolumesSupported = getMaxDataVolumesSupported(vm);
if (existingDataVolumes.size() >= maxDataVolumesSupported) {
throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxDataVolumesSupported + "). Please specify another" + " VM.");
}
}
// If local storage is disabled then attaching a volume with local disk
// offering not allowed
final DataCenterVO dataCenter = _dcDao.findById(volumeToAttach.getDataCenterId());
if (!dataCenter.isLocalStorageEnabled()) {
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId());
if (diskOffering.getUseLocalStorage()) {
throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it");
}
}
// if target VM has associated VM snapshots
final List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
if (vmSnapshots.size() > 0) {
throw new InvalidParameterValueException("Unable to attach volume, please specify a VM that does not have VM snapshots");
}
// permission check
_accountMgr.checkAccess(caller, null, true, volumeToAttach, vm);
if (!(Volume.State.Allocated.equals(volumeToAttach.getState()) || Volume.State.Ready.equals(volumeToAttach.getState()) || Volume.State.Uploaded.equals(volumeToAttach.getState()))) {
throw new InvalidParameterValueException("Volume state must be in Allocated, Ready or in Uploaded state");
}
final Account owner = _accountDao.findById(volumeToAttach.getAccountId());
if (!(volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Ready)) {
try {
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize());
} catch (final ResourceAllocationException e) {
s_logger.error("primary storage resource limit check failed", e);
throw new InvalidParameterValueException(e.getMessage());
}
}
final HypervisorType rootDiskHyperType = vm.getHypervisorType();
final HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
final StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
// only perform this check if the volume's storage pool is not null and not managed
if (volumeToAttachStoragePool != null && !volumeToAttachStoragePool.isManaged()) {
if (volumeToAttachHyperType != HypervisorType.None && rootDiskHyperType != volumeToAttachHyperType) {
throw new InvalidParameterValueException("Can't attach a volume created by: " + volumeToAttachHyperType + " to a " + rootDiskHyperType + " vm");
}
}
final AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (asyncExecutionContext != null) {
final AsyncJob job = asyncExecutionContext.getJob();
if (s_logger.isInfoEnabled()) {
s_logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status");
}
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
}
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
final VmWorkJobVO placeHolder;
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
final Outcome<Volume> outcome = attachVolumeToVmThroughJobQueue(vmId, volumeId, deviceId);
Volume vol = null;
try {
outcome.get();
} catch (final InterruptedException e) {
throw new RuntimeException("Operation is interrupted", e);
} catch (final java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution excetion", e);
}
final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof InvalidParameterValueException) {
throw (InvalidParameterValueException) jobResult;
} else if (jobResult instanceof RuntimeException) {
throw (RuntimeException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
} else if (jobResult instanceof Long) {
vol = _volsDao.findById((Long) jobResult);
}
}
return vol;
}
}
use of com.cloud.framework.jobs.AsyncJobExecutionContext in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method migrateVolume.
@DB
@Override
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_MIGRATE, eventDescription = "migrating volume", async = true)
public Volume migrateVolume(final MigrateVolumeCmd cmd) {
final Long volumeId = cmd.getVolumeId();
final Long storagePoolId = cmd.getStoragePoolId();
final VolumeVO vol = _volsDao.findById(volumeId);
if (vol == null) {
throw new InvalidParameterValueException("Failed to find the volume id: " + volumeId);
}
if (vol.getState() != Volume.State.Ready) {
throw new InvalidParameterValueException("Volume must be in ready state");
}
boolean liveMigrateVolume = false;
final Long instanceId = vol.getInstanceId();
Long srcClusterId = null;
VMInstanceVO vm = null;
if (instanceId != null) {
vm = _vmInstanceDao.findById(instanceId);
}
// Check that Vm to which this volume is attached does not have VM Snapshots
if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) {
throw new InvalidParameterValueException("Volume cannot be migrated, please remove all VM snapshots for VM to which this volume is attached");
}
if (vm != null && vm.getState() == State.Running) {
// Check if the VM is GPU enabled.
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
}
// Check if the underlying hypervisor supports storage motion.
final Long hostId = vm.getHostId();
if (hostId != null) {
final HostVO host = _hostDao.findById(hostId);
HypervisorCapabilitiesVO capabilities = null;
if (host != null) {
capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion());
srcClusterId = host.getClusterId();
}
if (capabilities != null) {
liveMigrateVolume = capabilities.isStorageMotionSupported();
}
}
// If vm is running, and hypervisor doesn't support live migration, then return error
if (!liveMigrateVolume) {
throw new InvalidParameterValueException("Volume needs to be detached from VM");
}
}
if (liveMigrateVolume && !cmd.isLiveMigrate()) {
throw new InvalidParameterValueException("The volume " + vol + "is attached to a vm and for migrating it " + "the parameter livemigrate should be specified");
}
final StoragePool destPool = (StoragePool) dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
if (destPool == null) {
throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId);
} else if (destPool.isInMaintenance()) {
throw new InvalidParameterValueException("Cannot migrate volume " + vol + "to the destination storage pool " + destPool.getName() + " as the storage pool is in maintenance mode.");
}
if (vol.getPoolId() == destPool.getId()) {
throw new InvalidParameterValueException("Cannot migrate to the same storage pool the volume is already residing on.");
}
if (_volumeMgr.volumeOnSharedStoragePool(vol)) {
if (destPool.isLocal()) {
throw new InvalidParameterValueException("Migration of volume from shared to local storage pool is not supported");
} else {
// to make sure that the destination storage pool is in the same cluster as the vm.
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
if (!srcClusterId.equals(destPool.getClusterId())) {
throw new InvalidParameterValueException("Cannot live migrate a volume of a virtual machine to a storage pool in a different cluster." + "You can live migrate the virtual machine itself, rather than the volume.");
}
}
}
} else {
throw new InvalidParameterValueException("Migration of volume from local storage pool is not supported");
}
if (vm != null) {
// serialize VM operation
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
final VmWorkJobVO placeHolder;
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateMigrateVolume(vol.getId(), destPool.getId(), liveMigrateVolume);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
final Outcome<Volume> outcome = migrateVolumeThroughJobQueue(vm.getId(), vol.getId(), destPool.getId(), liveMigrateVolume);
try {
outcome.get();
} catch (final InterruptedException e) {
throw new RuntimeException("Operation is interrupted", e);
} catch (final java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution excetion", e);
}
final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof RuntimeException) {
throw (RuntimeException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
}
}
// retrieve the migrated new volume from job result
if (jobResult != null && jobResult instanceof Long) {
return _entityMgr.findById(VolumeVO.class, (Long) jobResult);
}
return null;
}
}
return orchestrateMigrateVolume(vol.getId(), destPool.getId(), liveMigrateVolume);
}
Aggregations