use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportUpdateCompleter in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method updateExportGroup.
public void updateExportGroup(URI exportGroup, Map<URI, Integer> newVolumesMap, Set<URI> addedClusters, Set<URI> removedClusters, Set<URI> adedHosts, Set<URI> removedHosts, Set<URI> addedInitiators, Set<URI> removedInitiators, String stepId) throws Exception {
Map<URI, Integer> addedBlockObjects = new HashMap<URI, Integer>();
Map<URI, Integer> removedBlockObjects = new HashMap<URI, Integer>();
try {
ExportGroup exportGroupObject = _dbClient.queryObject(ExportGroup.class, exportGroup);
ExportUtils.getAddedAndRemovedBlockObjects(newVolumesMap, exportGroupObject, addedBlockObjects, removedBlockObjects);
BlockExportController blockController = getController(BlockExportController.class, BlockExportController.EXPORT);
Operation op = _dbClient.createTaskOpStatus(ExportGroup.class, exportGroup, stepId, ResourceOperationTypeEnum.UPDATE_EXPORT_GROUP);
exportGroupObject.getOpStatus().put(stepId, op);
_dbClient.updateObject(exportGroupObject);
// Test mechanism to invoke a failure. No-op on production systems.
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_026);
blockController.exportGroupUpdate(exportGroup, addedBlockObjects, removedBlockObjects, addedClusters, removedClusters, adedHosts, removedHosts, addedInitiators, removedInitiators, stepId);
// No code should be added following the call to the block controller to preserve rollback integrity
} catch (Exception ex) {
_log.error("Exception occured while updating export group {}", exportGroup, ex);
// Clean up any pending tasks
ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(exportGroup, addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators, adedHosts, removedHosts, addedClusters, removedClusters, stepId);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
// Fail the step
WorkflowStepCompleter.stepFailed(stepId, DeviceControllerException.errors.jobFailed(ex));
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportUpdateCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method exportGroupUpdate.
@Override
public void exportGroupUpdate(URI export, Map<URI, Integer> addedBlockObjectMap, Map<URI, Integer> removedBlockObjectMap, Set<URI> addedClusters, Set<URI> removedClusters, Set<URI> addedHosts, Set<URI> removedHosts, Set<URI> addedInitiators, Set<URI> removedInitiators, String opId) throws ControllerException {
Map<URI, Map<URI, Integer>> addedStorageToBlockObjects = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Map<URI, Integer>> removedStorageToBlockObjects = new HashMap<URI, Map<URI, Integer>>();
Workflow workflow = null;
List<Workflow> workflowList = new ArrayList<>();
try {
computeDiffs(export, addedBlockObjectMap, removedBlockObjectMap, addedStorageToBlockObjects, removedStorageToBlockObjects, addedInitiators, removedInitiators, addedHosts, removedHosts, addedClusters, removedClusters);
// Generate a flat list of volume/snap objects that will be added
// to the export update completer so the completer will know what
// to add upon task completion. We need not carry the block controller
// into the completer, so we strip that out of the map for the benefit of
// keeping the completer simple.
Map<URI, Integer> addedBlockObjects = new HashMap<>();
for (URI storageUri : addedStorageToBlockObjects.keySet()) {
addedBlockObjects.putAll(addedStorageToBlockObjects.get(storageUri));
}
// Generate a flat list of volume/snap objects that will be removed
// to the export update completer so the completer will know what
// to remove upon task completion.
Map<URI, Integer> removedBlockObjects = new HashMap<>();
for (URI storageUri : removedStorageToBlockObjects.keySet()) {
removedBlockObjects.putAll(removedStorageToBlockObjects.get(storageUri));
}
// Construct the export update completer with exactly which objects will
// be removed and added when it is complete.
ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(export, addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators, addedHosts, removedHosts, addedClusters, removedClusters, opId);
_log.info("Received request to update export group. Creating master workflow.");
workflow = _wfUtils.newWorkflow("exportGroupUpdate", false, opId);
_log.info("Task id {} and workflow uri {}", opId, workflow.getWorkflowURI());
workflowList.add(workflow);
for (URI storageUri : addedStorageToBlockObjects.keySet()) {
_log.info("Creating sub-workflow for storage system {}", String.valueOf(storageUri));
// TODO: Need to fix, getExportMask() returns a single mask,
// but there could be more than 1 for a array and ExportGroup
_wfUtils.generateExportGroupUpdateWorkflow(workflow, null, null, export, getExportMask(export, storageUri), addedStorageToBlockObjects.get(storageUri), removedStorageToBlockObjects.get(storageUri), new ArrayList(addedInitiators), new ArrayList(removedInitiators), storageUri, workflowList);
}
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateExportWorkflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (LockRetryException ex) {
/**
* Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
* workflow using the same taskid.
*/
_log.info(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
for (Workflow workflow2 : workflowList) {
if (workflow2 != null) {
boolean status = _wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow2);
_log.info("Release locks from workflow {} status {}", workflow2.getWorkflowURI(), status);
}
}
if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
if (!wf.getCompleted()) {
_log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
wf.setCompleted(true);
_dbClient.updateObject(wf);
}
}
throw ex;
} catch (Exception ex) {
ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(export, opId);
String message = "exportGroupUpdate caught an exception.";
_log.error(message, ex);
for (Workflow workflow2 : workflowList) {
if (workflow2 != null) {
boolean status = _wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow2);
_log.info("Release locks from workflow {} status {}", workflow2.getWorkflowURI(), status);
}
}
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
}
}
Aggregations