use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter in project coprhd-controller by CoprHD.
the class AbstractDefaultMaskingOrchestrator method generateExportMaskRemovePathsWorkflow.
/**
* Generate export mask remove path workflow step
*
* @param workflow - The workflow
* @param storage - Storage system
* @param exportGroupURI - Export group URI
* @param exportMaskURI - Export mask URI
* @param adjustedPaths - Adjusted paths
* @param removePaths - Paths to be removed
* @param previousStep - previous step that this step will wait for
* @return The created step id
* @throws Exception
*/
public String generateExportMaskRemovePathsWorkflow(Workflow workflow, StorageSystem storage, URI exportGroupURI, URI exportMaskURI, Map<URI, List<URI>> adjustedPaths, Map<URI, List<URI>> removePaths, String previousStep) throws Exception {
String maskingStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskRemovePathsCompleter(exportGroupURI, exportMaskURI, maskingStep);
Workflow.Method executeMethod = new Workflow.Method("doExportMaskRemovePaths", storage.getId(), exportGroupURI, exportMaskURI, adjustedPaths, removePaths, exportTaskCompleter);
maskingStep = workflow.createStep(EXPORT_MASK_REMOVE_PATHS_TASK, String.format("Removeing paths to export mask %s", exportMaskURI.toString()), previousStep, storage.getId(), storage.getSystemType(), MaskingWorkflowEntryPoints.class, executeMethod, null, maskingStep);
return maskingStep;
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method updateExportGroup.
public void updateExportGroup(URI exportGroup, Map<URI, Integer> newVolumesMap, Set<URI> addedClusters, Set<URI> removedClusters, Set<URI> adedHosts, Set<URI> removedHosts, Set<URI> addedInitiators, Set<URI> removedInitiators, String stepId) throws Exception {
Map<URI, Integer> addedBlockObjects = new HashMap<URI, Integer>();
Map<URI, Integer> removedBlockObjects = new HashMap<URI, Integer>();
try {
ExportGroup exportGroupObject = _dbClient.queryObject(ExportGroup.class, exportGroup);
ExportUtils.getAddedAndRemovedBlockObjects(newVolumesMap, exportGroupObject, addedBlockObjects, removedBlockObjects);
BlockExportController blockController = getController(BlockExportController.class, BlockExportController.EXPORT);
Operation op = _dbClient.createTaskOpStatus(ExportGroup.class, exportGroup, stepId, ResourceOperationTypeEnum.UPDATE_EXPORT_GROUP);
exportGroupObject.getOpStatus().put(stepId, op);
_dbClient.updateObject(exportGroupObject);
// Test mechanism to invoke a failure. No-op on production systems.
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_026);
blockController.exportGroupUpdate(exportGroup, addedBlockObjects, removedBlockObjects, addedClusters, removedClusters, adedHosts, removedHosts, addedInitiators, removedInitiators, stepId);
// No code should be added following the call to the block controller to preserve rollback integrity
} catch (Exception ex) {
_log.error("Exception occured while updating export group {}", exportGroup, ex);
// Clean up any pending tasks
ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(exportGroup, addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators, adedHosts, removedHosts, addedClusters, removedClusters, stepId);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
// Fail the step
WorkflowStepCompleter.stepFailed(stepId, DeviceControllerException.errors.jobFailed(ex));
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method exportGroupUpdate.
@Override
public void exportGroupUpdate(URI export, Map<URI, Integer> addedBlockObjectMap, Map<URI, Integer> removedBlockObjectMap, Set<URI> addedClusters, Set<URI> removedClusters, Set<URI> addedHosts, Set<URI> removedHosts, Set<URI> addedInitiators, Set<URI> removedInitiators, String opId) throws ControllerException {
Map<URI, Map<URI, Integer>> addedStorageToBlockObjects = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Map<URI, Integer>> removedStorageToBlockObjects = new HashMap<URI, Map<URI, Integer>>();
Workflow workflow = null;
List<Workflow> workflowList = new ArrayList<>();
try {
computeDiffs(export, addedBlockObjectMap, removedBlockObjectMap, addedStorageToBlockObjects, removedStorageToBlockObjects, addedInitiators, removedInitiators, addedHosts, removedHosts, addedClusters, removedClusters);
// Generate a flat list of volume/snap objects that will be added
// to the export update completer so the completer will know what
// to add upon task completion. We need not carry the block controller
// into the completer, so we strip that out of the map for the benefit of
// keeping the completer simple.
Map<URI, Integer> addedBlockObjects = new HashMap<>();
for (URI storageUri : addedStorageToBlockObjects.keySet()) {
addedBlockObjects.putAll(addedStorageToBlockObjects.get(storageUri));
}
// Generate a flat list of volume/snap objects that will be removed
// to the export update completer so the completer will know what
// to remove upon task completion.
Map<URI, Integer> removedBlockObjects = new HashMap<>();
for (URI storageUri : removedStorageToBlockObjects.keySet()) {
removedBlockObjects.putAll(removedStorageToBlockObjects.get(storageUri));
}
// Construct the export update completer with exactly which objects will
// be removed and added when it is complete.
ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(export, addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators, addedHosts, removedHosts, addedClusters, removedClusters, opId);
_log.info("Received request to update export group. Creating master workflow.");
workflow = _wfUtils.newWorkflow("exportGroupUpdate", false, opId);
_log.info("Task id {} and workflow uri {}", opId, workflow.getWorkflowURI());
workflowList.add(workflow);
for (URI storageUri : addedStorageToBlockObjects.keySet()) {
_log.info("Creating sub-workflow for storage system {}", String.valueOf(storageUri));
// TODO: Need to fix, getExportMask() returns a single mask,
// but there could be more than 1 for a array and ExportGroup
_wfUtils.generateExportGroupUpdateWorkflow(workflow, null, null, export, getExportMask(export, storageUri), addedStorageToBlockObjects.get(storageUri), removedStorageToBlockObjects.get(storageUri), new ArrayList(addedInitiators), new ArrayList(removedInitiators), storageUri, workflowList);
}
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateExportWorkflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (LockRetryException ex) {
/**
* Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
* workflow using the same taskid.
*/
_log.info(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
for (Workflow workflow2 : workflowList) {
if (workflow2 != null) {
boolean status = _wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow2);
_log.info("Release locks from workflow {} status {}", workflow2.getWorkflowURI(), status);
}
}
if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
if (!wf.getCompleted()) {
_log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
wf.setCompleted(true);
_dbClient.updateObject(wf);
}
}
throw ex;
} catch (Exception ex) {
ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(export, opId);
String message = "exportGroupUpdate caught an exception.";
_log.error(message, ex);
for (Workflow workflow2 : workflowList) {
if (workflow2 != null) {
boolean status = _wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow2);
_log.info("Release locks from workflow {} status {}", workflow2.getWorkflowURI(), status);
}
}
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method exportGroupChangePortGroup.
@Override
public void exportGroupChangePortGroup(URI systemURI, URI exportGroupURI, URI newPortGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String opId) {
_log.info("Received request for change port group. Creating master workflow.");
ExportTaskCompleter taskCompleter = new ExportOrchestrationUpdateTaskCompleter(exportGroupURI, opId);
Workflow workflow = null;
try {
workflow = _wfUtils.newWorkflow("exportChangePortGroup", false, opId);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
if (exportGroup == null || exportGroup.getExportMasks() == null) {
_log.info("No export group or export mask");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, systemURI);
if (exportMasks == null || exportMasks.isEmpty()) {
_log.info(String.format("No export mask found for this system %s", systemURI));
taskCompleter.ready(_dbClient);
return;
}
// Acquire all necessary locks for the workflow:
// For each export group lock initiator's hosts and storage array keys.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), systemURI);
boolean acquiredLocks = _wfUtils.getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
_log.error("Change port group could not acquire locks");
ServiceError serviceError = DeviceControllerException.errors.jobFailedOpMsg("change port group", "Could not acquire workflow lock");
taskCompleter.error(_dbClient, serviceError);
return;
}
_wfUtils.generateExportGroupChangePortWorkflow(workflow, "change port group", exportGroupURI, newPortGroupURI, exportMaskURIs, waitForApproval);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateExportWorkflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Unexpected exception: ", e);
if (workflow != null) {
_wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow);
}
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method changePortGroup.
@Override
public void changePortGroup(URI storageURI, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String token) {
ExportChangePortGroupCompleter taskCompleter = null;
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
taskCompleter = new ExportChangePortGroupCompleter(storageURI, exportGroupURI, token, portGroupURI);
logExportGroup(exportGroup, storageURI);
String workflowKey = "changePortGroup";
if (_workflowService.hasWorkflowBeenCreated(token, workflowKey)) {
return;
}
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), workflowKey, false, token);
if (CollectionUtils.isEmpty(exportMaskURIs)) {
_log.info("No export masks to change");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
String previousStep = null;
Set<URI> hostURIs = new HashSet<URI>();
for (ExportMask oldMask : exportMasks) {
// create a new masking view using the new port group
SmisStorageDevice device = (SmisStorageDevice) getDevice();
oldMask = device.refreshExportMask(storage, oldMask);
StringSet existingInits = oldMask.getExistingInitiators();
StringMap existingVols = oldMask.getExistingVolumes();
if (!CollectionUtils.isEmpty(existingInits)) {
String error = String.format("The export mask %s has unmanaged initiators %s", oldMask.getMaskName(), Joiner.on(',').join(existingInits));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
if (!CollectionUtils.isEmpty(existingVols)) {
String error = String.format("The export mask %s has unmanaged volumes %s", oldMask.getMaskName(), Joiner.on(',').join(existingVols.keySet()));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
InitiatorHelper initiatorHelper = new InitiatorHelper(StringSetUtil.stringSetToUriList(oldMask.getInitiators())).process(exportGroup);
List<String> initiatorNames = initiatorHelper.getPortNames();
List<URI> volumes = StringSetUtil.stringSetToUriList(oldMask.getVolumes().keySet());
ExportPathParams pathParams = _blockScheduler.calculateExportPathParamForVolumes(volumes, 0, storageURI, exportGroupURI);
pathParams.setStoragePorts(portGroup.getStoragePorts());
List<Initiator> initiators = ExportUtils.getExportMaskInitiators(oldMask, _dbClient);
List<URI> initURIs = new ArrayList<URI>();
for (Initiator init : initiators) {
if (!NullColumnValueGetter.isNullURI(init.getHost())) {
hostURIs.add(init.getHost());
}
initURIs.add(init.getId());
}
// Get impacted export groups
List<ExportGroup> impactedExportGroups = ExportMaskUtils.getExportGroups(_dbClient, oldMask);
List<URI> exportGroupURIs = URIUtil.toUris(impactedExportGroups);
_log.info("changePortGroup: exportMask {}, impacted export groups: {}", oldMask.getMaskName(), Joiner.on(',').join(exportGroupURIs));
Map<URI, List<URI>> assignments = _blockScheduler.assignStoragePorts(storage, exportGroup, initiators, null, pathParams, volumes, _networkDeviceController, exportGroup.getVirtualArray(), token);
// Trying to find if there is existing export mask or masking view for the same host and using the new
// port group. If found one, add the volumes in the current export mask to the new one; otherwise, create
// a new export mask/masking view, with the same storage group, initiator group and the new port group.
// then delete the current export mask.
ExportMask newMask = device.findExportMasksForPortGroupChange(storage, initiatorNames, portGroupURI);
Map<URI, Integer> volumesToAdd = StringMapUtil.stringMapToVolumeMap(oldMask.getVolumes());
if (newMask != null) {
updateZoningMap(exportGroup, newMask, true);
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), newMask.getMaskName()));
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(newMask), new ArrayList<URI>(volumesToAdd.keySet()));
String addVolumeStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskAddVolumeCompleter(exportGroupURI, newMask.getId(), volumesToAdd, addVolumeStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportGroupAddVolumes", storageURI, exportGroupURI, newMask.getId(), volumesToAdd, null, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupAddVolumes", storageURI, exportGroupURI, exportGroupURIs, newMask.getId(), volumesToAdd, initURIs, addVolumeStep);
previousStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Adding volumes to mask %s (%s)", newMask.getMaskName(), newMask.getId().toString()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, addVolumeStep);
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, newMask, volumesToAdd, null);
} else {
// We don't find existing export mask /masking view, we will create a new one.
// first, to construct the new export mask name, if the export mask has the original name, then
// append the new port group name to the current export mask name; if the export mask already has the current
// port group name appended, then remove the current port group name, and append the new one.
String oldName = oldMask.getMaskName();
URI oldPGURI = oldMask.getPortGroup();
if (oldPGURI != null) {
StoragePortGroup oldPG = _dbClient.queryObject(StoragePortGroup.class, oldPGURI);
if (oldPG != null) {
String pgName = oldPG.getLabel();
if (oldName.endsWith(pgName)) {
oldName = oldName.replaceAll(pgName, "");
}
}
}
String maskName = null;
if (oldName.endsWith("_")) {
maskName = String.format("%s%s", oldName, portGroup.getLabel());
} else {
maskName = String.format("%s_%s", oldName, portGroup.getLabel());
}
newMask = ExportMaskUtils.initializeExportMask(storage, exportGroup, initiators, volumesToAdd, getStoragePortsInPaths(assignments), assignments, maskName, _dbClient);
newMask.setPortGroup(portGroupURI);
List<BlockObject> vols = new ArrayList<BlockObject>();
for (URI boURI : volumesToAdd.keySet()) {
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
vols.add(bo);
}
newMask.addToUserCreatedVolumes(vols);
_dbClient.updateObject(newMask);
_log.info(String.format("Creating new exportMask %s", maskName));
// Make a new TaskCompleter for the exportStep. It has only one subtask.
// This is due to existing requirements in the doExportGroupCreate completion
// logic.
String maskingStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskChangePortGroupAddMaskCompleter(newMask.getId(), exportGroupURI, maskingStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportChangePortGroupAddPaths", storageURI, exportGroupURI, newMask.getId(), oldMask.getId(), portGroupURI, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupCreate", storageURI, exportGroupURI, newMask.getId(), maskingStep);
maskingStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Create export mask(%s) to use port group %s", newMask.getMaskName(), portGroup.getNativeGuid()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, maskingStep);
String zoningStep = workflow.createStepId();
List<URI> masks = new ArrayList<URI>();
masks.add(newMask.getId());
previousStep = generateZoningCreateWorkflow(workflow, maskingStep, exportGroup, masks, volumesToAdd, zoningStep);
}
}
previousStep = _wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (waitForApproval) {
// Insert a step that will be suspended. When it resumes, it will re-acquire the lock keys,
// which are released when the workflow suspends.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), storageURI);
String suspendMessage = "Adjust/rescan host/cluster paths. Press \"Resume\" to start removal of unnecessary paths." + "\"Rollback\" will terminate the order and roll back";
Workflow.Method method = WorkflowService.acquireWorkflowLocksMethod(lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
Workflow.Method rollbackNull = Workflow.NULL_METHOD;
previousStep = workflow.createStep("AcquireLocks", "Suspending for user verification of host/cluster connectivity.", previousStep, storage.getId(), storage.getSystemType(), WorkflowService.class, method, rollbackNull, waitForApproval, null);
workflow.setSuspendedStepMessage(previousStep, suspendMessage);
}
for (ExportMask exportMask : exportMasks) {
previousStep = generateChangePortGroupDeleteMaskWorkflowstep(storageURI, exportGroup, exportMask, previousStep, workflow);
}
_wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The change port group workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
// update ExportChangePortGroupCompleter with affected export groups
Set<URI> affectedExportGroups = new HashSet<URI>();
for (ExportMask mask : exportMasks) {
List<ExportGroup> assocExportGroups = ExportMaskUtils.getExportGroups(_dbClient, mask);
for (ExportGroup eg : assocExportGroups) {
affectedExportGroups.add(eg.getId());
}
}
taskCompleter.setAffectedExportGroups(affectedExportGroups);
workflow.executePlan(taskCompleter, "Change port group successfully.");
_workflowService.markWorkflowBeenCreated(token, workflowKey);
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Export change port group Orchestration failed.", e);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
}
}
}
Aggregations