use of org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext in project cloudstack by apache.
the class VolumeApiServiceImpl method takeSnapshot.
@Override
@ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "taking snapshot", async = true)
public Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType) throws ResourceAllocationException {
VolumeInfo volume = volFactory.getVolume(volumeId);
if (volume == null) {
throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't exist");
}
if (volume.getState() != Volume.State.Ready) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
}
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId());
if (storagePoolVO.isManaged() && locationType == null) {
locationType = Snapshot.LocationType.PRIMARY;
}
VMInstanceVO vm = null;
if (volume.getInstanceId() != null)
vm = _vmInstanceDao.findById(volume.getInstanceId());
if (vm != null) {
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateTakeVolumeSnapshot(volumeId, policyId, snapshotId, account, quiescevm, locationType);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Snapshot> outcome = takeVolumeSnapshotThroughJobQueue(vm.getId(), volumeId, policyId, snapshotId, account.getId(), quiescevm, locationType);
try {
outcome.get();
} catch (InterruptedException e) {
throw new RuntimeException("Operation is interrupted", e);
} catch (java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution excetion", e);
}
Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException)
throw (ConcurrentOperationException) jobResult;
else if (jobResult instanceof ResourceAllocationException)
throw (ResourceAllocationException) jobResult;
else if (jobResult instanceof Throwable)
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
}
return _snapshotDao.findById(snapshotId);
}
} else {
CreateSnapshotPayload payload = new CreateSnapshotPayload();
payload.setSnapshotId(snapshotId);
payload.setSnapshotPolicyId(policyId);
payload.setAccount(account);
payload.setQuiescevm(quiescevm);
volume.addPayload(payload);
return volService.takeSnapshot(volume);
}
}
use of org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext in project cloudstack by apache.
the class AsyncJobManagerImpl method joinJob.
@Override
@DB
public void joinJob(long jobId, long joinJobId, String wakeupHandler, String wakeupDispatcher, String[] wakeupTopcisOnMessageBus, long wakeupIntervalInMilliSeconds, long timeoutInMilliSeconds) {
Long syncSourceId = null;
AsyncJobExecutionContext context = AsyncJobExecutionContext.getCurrentExecutionContext();
assert (context.getJob() != null);
if (context.getJob().getSyncSource() != null) {
syncSourceId = context.getJob().getSyncSource().getQueueId();
}
_joinMapDao.joinJob(jobId, joinJobId, getMsid(), wakeupIntervalInMilliSeconds, timeoutInMilliSeconds, syncSourceId, wakeupHandler, wakeupDispatcher);
}
use of org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext in project cloudstack by apache.
the class AsyncJobManagerImpl method getExecutorRunnable.
private Runnable getExecutorRunnable(final AsyncJob job) {
return new ManagedContextRunnable() {
@Override
public void run() {
// register place-holder context to avoid installing system account call context
if (CallContext.current() == null)
CallContext.registerPlaceHolderContext();
String related = job.getRelated();
String logContext = job.getShortUuid();
if (related != null && !related.isEmpty()) {
NDC.push("job-" + related + "/" + "job-" + job.getId());
AsyncJob relatedJob = _jobDao.findByIdIncludingRemoved(Long.parseLong(related));
if (relatedJob != null) {
logContext = relatedJob.getShortUuid();
}
} else {
NDC.push("job-" + job.getId());
}
MDC.put("logcontextid", logContext);
try {
super.run();
} finally {
NDC.pop();
}
}
@Override
protected void runInContext() {
long runNumber = getJobRunNumber();
try {
//
try {
JmxUtil.registerMBean("AsyncJobManager", "Active Job " + job.getId(), new AsyncJobMBeanImpl(job));
} catch (Exception e) {
// is expected to fail under situations
if (s_logger.isTraceEnabled())
s_logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e));
}
_jobMonitor.registerActiveTask(runNumber, job.getId());
AsyncJobExecutionContext.setCurrentExecutionContext(new AsyncJobExecutionContext(job));
String related = job.getRelated();
String logContext = job.getShortUuid();
if (related != null && !related.isEmpty()) {
AsyncJob relatedJob = _jobDao.findByIdIncludingRemoved(Long.parseLong(related));
if (relatedJob != null) {
logContext = relatedJob.getShortUuid();
}
}
MDC.put("logcontextid", logContext);
// execute the job
if (s_logger.isDebugEnabled()) {
s_logger.debug("Executing " + StringUtils.cleanString(job.toString()));
}
if ((getAndResetPendingSignals(job) & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) {
AsyncJobDispatcher jobDispatcher = findWakeupDispatcher(job);
if (jobDispatcher != null) {
jobDispatcher.runJob(job);
} else {
// TODO, job wakeup is not in use yet
if (s_logger.isTraceEnabled())
s_logger.trace("Unable to find a wakeup dispatcher from the joined job: " + job);
}
} else {
AsyncJobDispatcher jobDispatcher = getDispatcher(job.getDispatcher());
if (jobDispatcher != null) {
jobDispatcher.runJob(job);
} else {
s_logger.error("Unable to find job dispatcher, job will be cancelled");
completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null);
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId());
}
} catch (Throwable e) {
s_logger.error("Unexpected exception", e);
completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null);
} finally {
// guard final clause as well
try {
if (job.getSyncSource() != null) {
// here check queue item one more time to double make sure that queue item is removed in case of any uncaught exception
_queueMgr.purgeItem(job.getSyncSource().getId());
}
try {
JmxUtil.unregisterMBean("AsyncJobManager", "Active Job " + job.getId());
} catch (Exception e) {
// is expected to fail under situations
if (s_logger.isTraceEnabled())
s_logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e));
}
//
// clean execution environment
//
AsyncJobExecutionContext.unregister();
_jobMonitor.unregisterActiveTask(runNumber);
} catch (Throwable e) {
s_logger.error("Double exception", e);
}
}
}
};
}
use of org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext in project cloudstack by apache.
the class VirtualMachineManagerImpl method restoreVirtualMachine.
@Override
public UserVm restoreVirtualMachine(final long vmId, final Long newTemplateId) throws ResourceUnavailableException, InsufficientCapacityException {
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
VmWorkJobVO placeHolder = null;
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateRestoreVirtualMachine(vmId, newTemplateId);
} finally {
if (placeHolder != null) {
_workJobDao.expunge(placeHolder.getId());
}
}
} else {
final Outcome<VirtualMachine> outcome = restoreVirtualMachineThroughJobQueue(vmId, newTemplateId);
retrieveVmFromJobOutcome(outcome, String.valueOf(vmId), "restoreVirtualMachine");
Object jobResult = retrieveResultFromJobOutcomeAndThrowExceptionIfNeeded(outcome);
if (jobResult != null && jobResult instanceof HashMap) {
HashMap<Long, String> passwordMap = (HashMap<Long, String>) jobResult;
UserVmVO userVm = _userVmDao.findById(vmId);
userVm.setPassword(passwordMap.get(vmId));
return userVm;
}
throw new RuntimeException("Unexpected job execution result");
}
}
use of org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext in project cloudstack by apache.
the class VirtualMachineManagerImpl method removeNicFromVm.
@Override
public boolean removeNicFromVm(final VirtualMachine vm, final Nic nic) throws ConcurrentOperationException, ResourceUnavailableException {
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
VmWorkJobVO placeHolder = createPlaceHolderWork(vm.getId());
try {
return orchestrateRemoveNicFromVm(vm, nic);
} finally {
if (placeHolder != null) {
_workJobDao.expunge(placeHolder.getId());
}
}
} else {
final Outcome<VirtualMachine> outcome = removeNicFromVmThroughJobQueue(vm, nic);
retrieveVmFromJobOutcome(outcome, vm.getUuid(), "removeNicFromVm");
try {
Object jobResult = retrieveResultFromJobOutcomeAndThrowExceptionIfNeeded(outcome);
if (jobResult != null && jobResult instanceof Boolean) {
return (Boolean) jobResult;
}
} catch (InsufficientCapacityException ex) {
throw new RuntimeException("Unexpected exception", ex);
}
throw new RuntimeException("Job failed with un-handled exception");
}
}
Aggregations