use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceController method restoreFromCloneStep.
public boolean restoreFromCloneStep(URI storage, List<URI> clones, Boolean updateOpStatus, boolean isCG, String opId) throws ControllerException {
TaskCompleter completer = null;
try {
StorageSystem storageDevice = _dbClient.queryObject(StorageSystem.class, storage);
if (!isCG) {
completer = new CloneRestoreCompleter(clones.get(0), opId);
getDevice(storageDevice.getSystemType()).doRestoreFromClone(storageDevice, clones.get(0), completer);
} else {
CloneRestoreCompleter taskCompleter = new CloneRestoreCompleter(clones, opId);
getDevice(storageDevice.getSystemType()).doRestoreFromGroupClone(storageDevice, clones, taskCompleter);
}
} catch (Exception e) {
_log.error(String.format("restoreFromClone failed - storage: %s,clone: %s", storage.toString(), clones.get(0).toString()));
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
completer.error(_dbClient, serviceError);
doFailTask(Volume.class, clones, opId, serviceError);
WorkflowStepCompleter.stepFailed(opId, serviceError);
return false;
}
return true;
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceController method createListSnapshot.
public void createListSnapshot(URI storage, List<URI> snapshotList, Boolean createInactive, Boolean readOnly, String opId) throws ControllerException {
WorkflowStepCompleter.stepExecuting(opId);
TaskCompleter completer = null;
try {
StorageSystem storageObj = _dbClient.queryObject(StorageSystem.class, storage);
completer = new BlockSnapshotCreateCompleter(snapshotList, opId);
getDevice(storageObj.getSystemType()).doCreateListReplica(storageObj, snapshotList, createInactive, completer);
} catch (Exception e) {
if (completer != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
WorkflowStepCompleter.stepFailed(opId, serviceError);
completer.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.createVolumeSnapshotFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceController method attachNativeContinuousCopies.
@Override
public void attachNativeContinuousCopies(URI storage, URI sourceVolume, List<URI> mirrorList, String opId) throws ControllerException {
_log.info("START attach continuous copies workflow");
Workflow workflow = _workflowService.getNewWorkflow(this, ATTACH_MIRRORS_WF_NAME, true, opId);
TaskCompleter taskCompleter = null;
Volume sourceVolumeObj = _dbClient.queryObject(Volume.class, sourceVolume);
boolean isCG = sourceVolumeObj.isInCG();
try {
addStepsForCreateMirrors(workflow, null, storage, sourceVolume, mirrorList, isCG);
taskCompleter = new BlockMirrorTaskCompleter(BlockMirror.class, mirrorList, opId);
workflow.executePlan(taskCompleter, "Successfully attached continuous copies");
} catch (Exception e) {
String msg = String.format("Failed to execute attach continuous copies workflow for volume %s", sourceVolume);
_log.error(msg, e);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class XtremIOMaskingOrchestrator method exportGroupDelete.
@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
try {
log.info(String.format("exportGroupDelete start - Array: %s ExportGroup: %s", storageURI.toString(), exportGroupURI.toString()));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
TaskCompleter taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
if (exportGroup == null || exportGroup.getInactive() || ExportMaskUtils.getExportMasks(_dbClient, exportGroup).isEmpty()) {
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token);
String previousStep = null;
refreshExportMask(storage, getDevice(), null);
/**
* TODO
* Right now,to make orchestration simple , we decided not to share export masks across Export Groups.
* But this rule is breaking an existing export Test case.
* 1. If export mask is shared across export groups ,deleting an export mask means identifying the
* right set of initiators and volumes to be removed from both the export Groups.
*/
if (exportMasks != null && !exportMasks.isEmpty()) {
for (ExportMask exportMask : exportMasks) {
List<URI> initiators = StringSetUtil.stringSetToUriList(exportMask.getInitiators());
List<URI> volumesInMask = ExportMaskUtils.getUserAddedVolumeURIs(exportMask);
previousStep = generateExportMaskDeleteWorkflow(workflow, previousStep, storage, exportGroup, exportMask, volumesInMask, initiators, null);
}
previousStep = generateZoningDeleteWorkflow(workflow, previousStep, exportGroup, exportMasks);
}
String successMessage = String.format("Export was successfully removed from StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
log.info(String.format("exportGroupDelete end - Array: %s ExportGroup: %s", storageURI.toString(), exportGroupURI.toString()));
} catch (Exception e) {
throw DeviceControllerException.exceptions.exportGroupDeleteFailed(e);
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class HDSMetaVolumeOperations method waitForAsyncHDSJob.
/**
* Waits the thread to till the operation completes.
*
* @param storageDeviceURI
* @param messageId
* @param job
* @param hdsApiFactory
* @return
* @throws HDSException
*/
private JobStatus waitForAsyncHDSJob(URI storageDeviceURI, String messageId, HDSJob job, HDSApiFactory hdsApiFactory) throws HDSException {
JobStatus status = JobStatus.IN_PROGRESS;
if (job == null) {
TaskCompleter taskCompleter = new TaskCompleter() {
@Override
public void ready(DbClient dbClient) throws DeviceControllerException {
}
@Override
public void error(DbClient dbClient, ServiceCoded serviceCoded) throws DeviceControllerException {
}
@Override
protected void complete(DbClient dbClient, Operation.Status status, ServiceCoded coded) throws DeviceControllerException {
}
};
job = new HDSJob(messageId, storageDeviceURI, taskCompleter, "");
} else {
job.setHDSJob(messageId);
}
JobContext jobContext = new JobContext(dbClient, null, null, hdsApiFactory, null, null, null, null);
long startTime = System.currentTimeMillis();
while (true) {
JobPollResult result = job.poll(jobContext, SYNC_WRAPPER_WAIT);
if (result.getJobStatus().equals(JobStatus.IN_PROGRESS) || result.getJobStatus().equals(JobStatus.ERROR)) {
if (System.currentTimeMillis() - startTime > SYNC_WRAPPER_TIME_OUT) {
HDSException.exceptions.asyncTaskFailedTimeout(System.currentTimeMillis() - startTime);
} else {
try {
Thread.sleep(SYNC_WRAPPER_WAIT);
} catch (InterruptedException e) {
log.error("Thread waiting for hds job to complete was interrupted and " + "will be resumed");
}
}
} else {
status = result.getJobStatus();
if (!status.equals(JobStatus.SUCCESS)) {
HDSException.exceptions.asyncTaskFailedWithErrorResponseWithoutErrorCode(messageId, result.getErrorDescription());
}
break;
}
}
return status;
}
Aggregations