use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method exportGroupChangePortGroup.
@Override
public void exportGroupChangePortGroup(URI systemURI, URI exportGroupURI, URI newPortGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String opId) {
_log.info("Received request for change port group. Creating master workflow.");
ExportTaskCompleter taskCompleter = new ExportOrchestrationUpdateTaskCompleter(exportGroupURI, opId);
Workflow workflow = null;
try {
workflow = _wfUtils.newWorkflow("exportChangePortGroup", false, opId);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
if (exportGroup == null || exportGroup.getExportMasks() == null) {
_log.info("No export group or export mask");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, systemURI);
if (exportMasks == null || exportMasks.isEmpty()) {
_log.info(String.format("No export mask found for this system %s", systemURI));
taskCompleter.ready(_dbClient);
return;
}
// Acquire all necessary locks for the workflow:
// For each export group lock initiator's hosts and storage array keys.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), systemURI);
boolean acquiredLocks = _wfUtils.getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
_log.error("Change port group could not acquire locks");
ServiceError serviceError = DeviceControllerException.errors.jobFailedOpMsg("change port group", "Could not acquire workflow lock");
taskCompleter.error(_dbClient, serviceError);
return;
}
_wfUtils.generateExportGroupChangePortWorkflow(workflow, "change port group", exportGroupURI, newPortGroupURI, exportMaskURIs, waitForApproval);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateExportWorkflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Unexpected exception: ", e);
if (workflow != null) {
_wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow);
}
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method updatePolicyAndLimits.
@Override
public void updatePolicyAndLimits(List<URI> volumeURIs, URI newVpoolURI, String opId) throws ControllerException {
_log.info("Received request to update Auto-tiering policy. Creating master workflow.");
VolumeVpoolAutoTieringPolicyChangeTaskCompleter taskCompleter = null;
URI oldVpoolURI = null;
List<Volume> volumes = new ArrayList<Volume>();
List<Volume> vplexBackendVolumes = new ArrayList<Volume>();
try {
// Read volume from database, update the vPool to the new vPool
// and update new auto tiering policy uri, and create task completer.
volumes = _dbClient.queryObject(Volume.class, volumeURIs);
VirtualPool newVpool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
Map<URI, URI> oldVolToPolicyMap = new HashMap<URI, URI>();
for (Volume volume : volumes) {
oldVpoolURI = volume.getVirtualPool();
volume.setVirtualPool(newVpoolURI);
_log.info(String.format("Changing VirtualPool Auto-tiering Policy for volume %s (%s) from %s to %s", volume.getLabel(), volume.getId(), oldVpoolURI, newVpoolURI));
oldVolToPolicyMap.put(volume.getId(), volume.getAutoTieringPolicyUri());
updateAutoTieringPolicyUriInVolume(volume, newVpool);
// Check if it is a VPlex volume, and get backend volumes
Volume backendSrc = VPlexUtil.getVPLEXBackendVolume(volume, true, _dbClient, false);
if (backendSrc != null) {
// Change the back end volume's vPool too
backendSrc.setVirtualPool(newVpoolURI);
vplexBackendVolumes.add(backendSrc);
_log.info(String.format("Changing VirtualPool Auto-tiering Policy for VPLEX backend source volume %s (%s) from %s to %s", backendSrc.getLabel(), backendSrc.getId(), oldVpoolURI, newVpoolURI));
oldVolToPolicyMap.put(backendSrc.getId(), backendSrc.getAutoTieringPolicyUri());
updateAutoTieringPolicyUriInVolume(backendSrc, newVpool);
// VPlex volume, check if it is distributed
Volume backendHa = VPlexUtil.getVPLEXBackendVolume(volume, false, _dbClient, false);
if (backendHa != null) {
VirtualPool newHAVpool = VirtualPool.getHAVPool(newVpool, _dbClient);
if (newHAVpool == null) {
// it may not be set
newHAVpool = newVpool;
}
backendHa.setVirtualPool(newHAVpool.getId());
vplexBackendVolumes.add(backendHa);
_log.info(String.format("Changing VirtualPool Auto-tiering Policy for VPLEX backend distributed volume %s (%s) from %s to %s", backendHa.getLabel(), backendHa.getId(), oldVpoolURI, newHAVpool.getId()));
oldVolToPolicyMap.put(backendHa.getId(), backendHa.getAutoTieringPolicyUri());
updateAutoTieringPolicyUriInVolume(backendHa, newHAVpool);
}
}
}
_dbClient.updateObject(volumes);
_dbClient.updateObject(vplexBackendVolumes);
// The VolumeVpoolChangeTaskCompleter will restore the old Virtual Pool
// and old auto tiering policy in event of error.
// Assume all volumes belong to the same vPool. This should be take care by BlockService API.
taskCompleter = new VolumeVpoolAutoTieringPolicyChangeTaskCompleter(volumeURIs, oldVpoolURI, oldVolToPolicyMap, opId);
} catch (Exception ex) {
_log.error("Unexpected exception reading volume or generating taskCompleter: ", ex);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volumeURIs, opId);
completer.error(_dbClient, serviceError);
}
try {
Workflow workflow = _wfUtils.newWorkflow("updateAutoTieringPolicy", false, opId);
/**
* For VMAX:
* get corresponding export mask for each volume
* group volumes by export mask
* create workflow step for each export mask.
*
* For VNX Block:
* Policy is set on volume during its creation.
* Whether it is exported or not, send all volumes
* to update StorageTierMethodology property on them.
* Create workflow step for each storage system.
*/
// Use backend volumes list if it is VPLEX volume
List<Volume> volumesToUse = !vplexBackendVolumes.isEmpty() ? vplexBackendVolumes : volumes;
// move applicable volumes from all volumes list to a separate list.
Map<URI, List<URI>> systemToVolumeMap = getVolumesToModify(volumesToUse);
String stepId = null;
for (URI systemURI : systemToVolumeMap.keySet()) {
stepId = _wfUtils.generateExportChangePolicyAndLimits(workflow, "updateAutoTieringPolicy", stepId, systemURI, null, null, systemToVolumeMap.get(systemURI), newVpoolURI, oldVpoolURI);
}
Map<URI, List<URI>> storageToNotExportedVolumesMap = new HashMap<URI, List<URI>>();
Map<URI, List<URI>> exportMaskToVolumeMap = new HashMap<URI, List<URI>>();
Map<URI, URI> maskToGroupURIMap = new HashMap<URI, URI>();
for (Volume volume : volumesToUse) {
// Locate all the ExportMasks containing the given volume
Map<ExportMask, ExportGroup> maskToGroupMap = ExportUtils.getExportMasks(volume, _dbClient);
if (maskToGroupMap.isEmpty()) {
URI storageURI = volume.getStorageController();
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
if (storage.checkIfVmax3()) {
if (!storageToNotExportedVolumesMap.containsKey(storageURI)) {
storageToNotExportedVolumesMap.put(storageURI, new ArrayList<URI>());
}
storageToNotExportedVolumesMap.get(storageURI).add(volume.getId());
}
}
for (ExportMask mask : maskToGroupMap.keySet()) {
if (!exportMaskToVolumeMap.containsKey(mask.getId())) {
exportMaskToVolumeMap.put(mask.getId(), new ArrayList<URI>());
}
exportMaskToVolumeMap.get(mask.getId()).add(volume.getId());
maskToGroupURIMap.put(mask.getId(), maskToGroupMap.get(mask).getId());
}
}
VirtualPool oldVpool = _dbClient.queryObject(VirtualPool.class, oldVpoolURI);
for (URI exportMaskURI : exportMaskToVolumeMap.keySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
List<URI> exportMaskVolumes = exportMaskToVolumeMap.get(exportMaskURI);
URI exportMaskNewVpool = newVpoolURI;
URI exportMaskOldVpool = oldVpoolURI;
Volume vol = _dbClient.queryObject(Volume.class, exportMaskVolumes.get(0));
// all volumes are already updated with respective new vPool
if (Volume.checkForVplexBackEndVolume(_dbClient, vol) && !newVpoolURI.equals(vol.getVirtualPool())) {
// backend distributed volume; HA vPool set in Vplex vPool
exportMaskNewVpool = vol.getVirtualPool();
VirtualPool oldHAVpool = VirtualPool.getHAVPool(oldVpool, _dbClient);
if (oldHAVpool == null) {
// it may not be set
oldHAVpool = oldVpool;
}
exportMaskOldVpool = oldHAVpool.getId();
}
stepId = _wfUtils.generateExportChangePolicyAndLimits(workflow, "updateAutoTieringPolicy", stepId, exportMask.getStorageDevice(), exportMaskURI, maskToGroupURIMap.get(exportMaskURI), exportMaskVolumes, exportMaskNewVpool, exportMaskOldVpool);
}
for (URI storageURI : storageToNotExportedVolumesMap.keySet()) {
stepId = _wfUtils.generateChangeAutoTieringPolicy(workflow, "updateAutoTieringPolicyForNotExportedVMAX3Volumes", stepId, storageURI, storageToNotExportedVolumesMap.get(storageURI), newVpoolURI, oldVpoolURI);
}
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateAutoTieringPolicy workflow has {} step(s). Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Updated the export group on all storage systems successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception ex) {
_log.error("Unexpected exception: ", ex);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class CephMaskingOrchestrator method exportGroupDelete.
@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (masks != null && !masks.isEmpty()) {
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token, taskCompleter);
Map<URI, Integer> volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
List<URI> volumeURIs = new ArrayList<>(volumeMap.keySet());
List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
Map<URI, Map<URI, Integer>> exportMaskToVolumeCount = ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);
for (ExportMask exportMask : masks) {
List<URI> exportGroupURIs = new ArrayList<>();
if (!ExportUtils.isExportMaskShared(_dbClient, exportMask.getId(), exportGroupURIs)) {
_log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, taskCompleter);
} else {
Map<URI, Integer> volumeToExportGroupCount = exportMaskToVolumeCount.get(exportMask.getId());
List<URI> volumesToRemove = new ArrayList<>();
for (URI uri : volumeMap.keySet()) {
if (volumeToExportGroupCount == null) {
continue;
}
// Remove the volume only if it is not shared with
// more than 1 ExportGroup
Integer numExportGroupsVolumeIsIn = volumeToExportGroupCount.get(uri);
if (numExportGroupsVolumeIsIn != null && numExportGroupsVolumeIsIn == 1) {
volumesToRemove.add(uri);
}
}
if (!volumesToRemove.isEmpty()) {
_log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(volumesToRemove), exportMask.getMaskName()));
generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, volumesToRemove, null, taskCompleter);
}
}
}
String successMessage = String.format("ExportGroup delete successfully completed for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupDelete", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupDelete", ex.getMessage()));
}
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method changePortGroup.
@Override
public void changePortGroup(URI storageURI, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String token) {
ExportChangePortGroupCompleter taskCompleter = null;
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
taskCompleter = new ExportChangePortGroupCompleter(storageURI, exportGroupURI, token, portGroupURI);
logExportGroup(exportGroup, storageURI);
String workflowKey = "changePortGroup";
if (_workflowService.hasWorkflowBeenCreated(token, workflowKey)) {
return;
}
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), workflowKey, false, token);
if (CollectionUtils.isEmpty(exportMaskURIs)) {
_log.info("No export masks to change");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
String previousStep = null;
Set<URI> hostURIs = new HashSet<URI>();
for (ExportMask oldMask : exportMasks) {
// create a new masking view using the new port group
SmisStorageDevice device = (SmisStorageDevice) getDevice();
oldMask = device.refreshExportMask(storage, oldMask);
StringSet existingInits = oldMask.getExistingInitiators();
StringMap existingVols = oldMask.getExistingVolumes();
if (!CollectionUtils.isEmpty(existingInits)) {
String error = String.format("The export mask %s has unmanaged initiators %s", oldMask.getMaskName(), Joiner.on(',').join(existingInits));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
if (!CollectionUtils.isEmpty(existingVols)) {
String error = String.format("The export mask %s has unmanaged volumes %s", oldMask.getMaskName(), Joiner.on(',').join(existingVols.keySet()));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
InitiatorHelper initiatorHelper = new InitiatorHelper(StringSetUtil.stringSetToUriList(oldMask.getInitiators())).process(exportGroup);
List<String> initiatorNames = initiatorHelper.getPortNames();
List<URI> volumes = StringSetUtil.stringSetToUriList(oldMask.getVolumes().keySet());
ExportPathParams pathParams = _blockScheduler.calculateExportPathParamForVolumes(volumes, 0, storageURI, exportGroupURI);
pathParams.setStoragePorts(portGroup.getStoragePorts());
List<Initiator> initiators = ExportUtils.getExportMaskInitiators(oldMask, _dbClient);
List<URI> initURIs = new ArrayList<URI>();
for (Initiator init : initiators) {
if (!NullColumnValueGetter.isNullURI(init.getHost())) {
hostURIs.add(init.getHost());
}
initURIs.add(init.getId());
}
// Get impacted export groups
List<ExportGroup> impactedExportGroups = ExportMaskUtils.getExportGroups(_dbClient, oldMask);
List<URI> exportGroupURIs = URIUtil.toUris(impactedExportGroups);
_log.info("changePortGroup: exportMask {}, impacted export groups: {}", oldMask.getMaskName(), Joiner.on(',').join(exportGroupURIs));
Map<URI, List<URI>> assignments = _blockScheduler.assignStoragePorts(storage, exportGroup, initiators, null, pathParams, volumes, _networkDeviceController, exportGroup.getVirtualArray(), token);
// Trying to find if there is existing export mask or masking view for the same host and using the new
// port group. If found one, add the volumes in the current export mask to the new one; otherwise, create
// a new export mask/masking view, with the same storage group, initiator group and the new port group.
// then delete the current export mask.
ExportMask newMask = device.findExportMasksForPortGroupChange(storage, initiatorNames, portGroupURI);
Map<URI, Integer> volumesToAdd = StringMapUtil.stringMapToVolumeMap(oldMask.getVolumes());
if (newMask != null) {
updateZoningMap(exportGroup, newMask, true);
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), newMask.getMaskName()));
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(newMask), new ArrayList<URI>(volumesToAdd.keySet()));
String addVolumeStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskAddVolumeCompleter(exportGroupURI, newMask.getId(), volumesToAdd, addVolumeStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportGroupAddVolumes", storageURI, exportGroupURI, newMask.getId(), volumesToAdd, null, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupAddVolumes", storageURI, exportGroupURI, exportGroupURIs, newMask.getId(), volumesToAdd, initURIs, addVolumeStep);
previousStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Adding volumes to mask %s (%s)", newMask.getMaskName(), newMask.getId().toString()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, addVolumeStep);
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, newMask, volumesToAdd, null);
} else {
// We don't find existing export mask /masking view, we will create a new one.
// first, to construct the new export mask name, if the export mask has the original name, then
// append the new port group name to the current export mask name; if the export mask already has the current
// port group name appended, then remove the current port group name, and append the new one.
String oldName = oldMask.getMaskName();
URI oldPGURI = oldMask.getPortGroup();
if (oldPGURI != null) {
StoragePortGroup oldPG = _dbClient.queryObject(StoragePortGroup.class, oldPGURI);
if (oldPG != null) {
String pgName = oldPG.getLabel();
if (oldName.endsWith(pgName)) {
oldName = oldName.replaceAll(pgName, "");
}
}
}
String maskName = null;
if (oldName.endsWith("_")) {
maskName = String.format("%s%s", oldName, portGroup.getLabel());
} else {
maskName = String.format("%s_%s", oldName, portGroup.getLabel());
}
newMask = ExportMaskUtils.initializeExportMask(storage, exportGroup, initiators, volumesToAdd, getStoragePortsInPaths(assignments), assignments, maskName, _dbClient);
newMask.setPortGroup(portGroupURI);
List<BlockObject> vols = new ArrayList<BlockObject>();
for (URI boURI : volumesToAdd.keySet()) {
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
vols.add(bo);
}
newMask.addToUserCreatedVolumes(vols);
_dbClient.updateObject(newMask);
_log.info(String.format("Creating new exportMask %s", maskName));
// Make a new TaskCompleter for the exportStep. It has only one subtask.
// This is due to existing requirements in the doExportGroupCreate completion
// logic.
String maskingStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskChangePortGroupAddMaskCompleter(newMask.getId(), exportGroupURI, maskingStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportChangePortGroupAddPaths", storageURI, exportGroupURI, newMask.getId(), oldMask.getId(), portGroupURI, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupCreate", storageURI, exportGroupURI, newMask.getId(), maskingStep);
maskingStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Create export mask(%s) to use port group %s", newMask.getMaskName(), portGroup.getNativeGuid()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, maskingStep);
String zoningStep = workflow.createStepId();
List<URI> masks = new ArrayList<URI>();
masks.add(newMask.getId());
previousStep = generateZoningCreateWorkflow(workflow, maskingStep, exportGroup, masks, volumesToAdd, zoningStep);
}
}
previousStep = _wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (waitForApproval) {
// Insert a step that will be suspended. When it resumes, it will re-acquire the lock keys,
// which are released when the workflow suspends.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), storageURI);
String suspendMessage = "Adjust/rescan host/cluster paths. Press \"Resume\" to start removal of unnecessary paths." + "\"Rollback\" will terminate the order and roll back";
Workflow.Method method = WorkflowService.acquireWorkflowLocksMethod(lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
Workflow.Method rollbackNull = Workflow.NULL_METHOD;
previousStep = workflow.createStep("AcquireLocks", "Suspending for user verification of host/cluster connectivity.", previousStep, storage.getId(), storage.getSystemType(), WorkflowService.class, method, rollbackNull, waitForApproval, null);
workflow.setSuspendedStepMessage(previousStep, suspendMessage);
}
for (ExportMask exportMask : exportMasks) {
previousStep = generateChangePortGroupDeleteMaskWorkflowstep(storageURI, exportGroup, exportMask, previousStep, workflow);
}
_wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The change port group workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
// update ExportChangePortGroupCompleter with affected export groups
Set<URI> affectedExportGroups = new HashSet<URI>();
for (ExportMask mask : exportMasks) {
List<ExportGroup> assocExportGroups = ExportMaskUtils.getExportGroups(_dbClient, mask);
for (ExportGroup eg : assocExportGroups) {
affectedExportGroups.add(eg.getId());
}
}
taskCompleter.setAffectedExportGroups(affectedExportGroups);
workflow.executePlan(taskCompleter, "Change port group successfully.");
_workflowService.markWorkflowBeenCreated(token, workflowKey);
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Export change port group Orchestration failed.", e);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method exportGroupAddInitiators.
@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
BlockStorageDevice device = getDevice();
String previousStep = null;
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
logExportGroup(exportGroup, storageURI);
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);
Map<URI, List<URI>> zoneMasksToInitiatorsURIs = new HashMap<URI, List<URI>>();
Map<URI, Map<URI, Integer>> zoneNewMasksToVolumeMap = new HashMap<URI, Map<URI, Integer>>();
Map<URI, ExportMask> refreshedMasks = new HashMap<URI, ExportMask>();
// Populate a map of volumes on the storage device
List<BlockObject> blockObjects = new ArrayList<BlockObject>();
Map<URI, Integer> volumeMap = new HashMap<URI, Integer>();
if (exportGroup != null && exportGroup.getVolumes() != null) {
for (Map.Entry<String, String> entry : exportGroup.getVolumes().entrySet()) {
URI boURI = URI.create(entry.getKey());
Integer hlu = Integer.valueOf(entry.getValue());
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
if (bo.getStorageController().equals(storageURI)) {
volumeMap.put(boURI, hlu);
blockObjects.add(bo);
}
}
}
InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
boolean anyOperationsToDo = false;
Set<URI> partialMasks = new HashSet<>();
Map<String, Set<URI>> initiatorToExportMaskPlacementMap = determineInitiatorToExportMaskPlacements(exportGroup, storageURI, initiatorHelper.getResourceToInitiators(), device.findExportMasks(storage, initiatorHelper.getPortNames(), false), initiatorHelper.getPortNameToInitiatorURI(), partialMasks);
if (!initiatorToExportMaskPlacementMap.isEmpty()) {
Map<URI, ExportMaskPolicy> policyCache = new HashMap<>();
// The logic contained here is trying to place the initiators that were passed down in the
// request. If we are in this path where the initiatorToExportMaskPlacementMap is not empty, then there
// are several cases why we got here:
//
// 1). An ExportMask has been found that is associated with the ExportGroup and it
// is supposed to be the container for the compute resources that we are attempting
// to add initiators for.
// 2). An ExportMask has been found that is on the array. It may not be associated with the
// ExportGroup, but it is supposed to be the container for the compute resources that
// we are attempting to add initiators for.
// 3). An ExportMask has been found that is on the array. It may not be associated with the
// ExportGroup, but it has the initiators that we are trying to add
// 4). One of the above possibilities + an initiator that cannot be placed. The use-case here
// would someone adds a new initiator for an existing host and a new host to a cluster export.
List<URI> initiatorsToPlace = new ArrayList<URI>();
initiatorsToPlace.addAll(initiatorURIs);
// This loop will determine a list of volumes to update per export mask
Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
for (Map.Entry<String, Set<URI>> entry : initiatorToExportMaskPlacementMap.entrySet()) {
URI initiatorURI = initiatorHelper.getPortNameToInitiatorURI().get(entry.getKey());
if (initiatorURI == null || exportGroup == null) {
// This initiator does not exist or it is not one of the initiators passed to the function
continue;
}
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
// Get a list of the ExportMasks that were matched to the initiator
List<URI> exportMaskURIs = new ArrayList<URI>();
exportMaskURIs.addAll(entry.getValue());
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
_log.info(String.format("Trying to place initiator %s", entry.getKey()));
for (ExportMask mask : masks) {
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
_log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
_log.info(String.format("Trying to place initiator %s in mask %s", entry.getKey(), mask.getMaskName()));
if (mask.getInactive() && !mask.getStorageDevice().equals(storageURI)) {
continue;
}
// determineInitiatorToExportMaskPlacements or findExportMasks
if (!refreshedMasks.containsKey(mask.getId())) {
mask = device.refreshExportMask(storage, mask);
refreshedMasks.put(mask.getId(), mask);
}
ExportMaskPolicy policy = getExportMaskPolicy(policyCache, device, storage, mask);
// yet. The below logic will add the volumes necessary.
if (mask.hasInitiator(initiatorURI.toString()) && CollectionUtils.isEmpty(ExportUtils.getExportMasksSharingInitiator(_dbClient, initiatorURI, mask, exportMaskURIs))) {
_log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
// already in the masks to the placement list
for (BlockObject blockObject : blockObjects) {
// blockObject properties, and so on.
if (!mask.hasExistingVolume(blockObject.getWWN()) && !mask.hasVolume(blockObject.getId())) {
String volumePolicyName = ControllerUtils.getAutoTieringPolicyName(blockObject.getId(), _dbClient);
if (((volumePolicyName == null || volumePolicyName.equalsIgnoreCase(Constants.NONE.toString())) && (policy.tierPolicies == null || policy.tierPolicies.isEmpty())) || ((volumePolicyName != null && policy.tierPolicies != null && policy.tierPolicies.size() == 1 && policy.tierPolicies.contains(volumePolicyName)))) {
_log.info(String.format("mask doesn't have volume %s yet, need to add it", blockObject.getId()));
Map<URI, Integer> newVolumesMap = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (newVolumesMap == null) {
newVolumesMap = new HashMap<URI, Integer>();
existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumesMap);
}
newVolumesMap.put(blockObject.getId(), volumeMap.get(blockObject.getId()));
}
} else {
_log.info(String.format("not adding volume %s to mask %s", blockObject.getId(), mask.getMaskName()));
}
}
// The initiator has been placed - it is in an already existing export
// for which case, we may just have to add volumes to it
initiatorsToPlace.remove(initiatorURI);
} else {
Set<URI> existingInitiatorIds = ExportMaskUtils.getAllInitiatorsForExportMask(_dbClient, mask);
if (existingInitiatorIds.isEmpty()) {
_log.info(String.format("not adding initiator to %s mask %s because there are no initiators associated with this mask", initiatorURI, mask.getMaskName()));
}
// This mask does not contain the initiator, but it may not belong to the same compute resource.
for (URI existingInitiatorId : existingInitiatorIds) {
Initiator existingInitiator = _dbClient.queryObject(Initiator.class, existingInitiatorId);
if (existingInitiator == null) {
_log.warn(String.format("Initiator %s was found to be associated with ExportMask %s, but no longer exists in the DB", existingInitiatorId, mask.getId()));
continue;
}
if ((existingInitiator.getHost() != null && existingInitiator.getHost().equals(initiator.getHost())) || (existingInitiator.getClusterName() != null && existingInitiator.getClusterName().equals(initiator.getClusterName()))) {
// Place the initiator in this ExportMask.
if (exportGroup.forCluster() && !policy.isCascadedIG() && ((existingInitiator.getHost() == null || !existingInitiator.getHost().equals(initiator.getHost())))) {
_log.info(String.format("not adding initiator to %s mask %s because it is likely part of another mask in the cluster", initiatorURI, mask.getMaskName()));
continue;
}
Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (existingMaskInitiators == null) {
existingMaskInitiators = new HashSet<Initiator>();
existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
}
_log.info(String.format("adding initiator to %s mask %s because it was found to be in the same compute resource", initiatorURI, mask.getMaskName()));
existingMaskInitiators.add(initiator);
// The initiator has been placed - it is not in the export, we will have to
// add it to the mask
initiatorsToPlace.remove(initiatorURI);
} else {
_log.info(String.format("not adding initiator to %s mask %s because it doesn't belong to the same compute resource", existingInitiator.getId(), mask.getMaskName()));
}
}
}
updateZoningMap(exportGroup, mask, true);
}
}
// so let's add them to the main tracker
if (!initiatorsToPlace.isEmpty()) {
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorsToPlace);
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
previousStep = result.getStepId();
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
anyOperationsToDo = true;
}
}
_log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
// At this point we have a mapping of all the masks that we need to update with new volumes
for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Map<URI, Integer> volumesToAdd = entry.getValue();
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.addAll(volumesToAdd.keySet());
List<ExportMask> masks = new ArrayList<ExportMask>();
masks.add(mask);
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, masks, volumeURIs);
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToAdd, null);
anyOperationsToDo = true;
}
// At this point we have a mapping of all the masks that we need to update with new initiators
for (Map.Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Set<Initiator> initiatorsToAdd = entry.getValue();
List<URI> initiatorsURIs = new ArrayList<URI>();
for (Initiator initiator : initiatorsToAdd) {
initiatorsURIs.add(initiator.getId());
}
_log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
maskToInitiatorsMap.put(mask.getId(), initiatorsURIs);
previousStep = generateExportMaskAddInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token);
previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
anyOperationsToDo = true;
}
} else {
_log.info("There are no masks for this export. Need to create anew.");
// zones required (which might be on multiple NetworkSystems.)
for (Map.Entry<String, List<URI>> resourceEntry : initiatorHelper.getResourceToInitiators().entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
previousStep = result.getStepId();
anyOperationsToDo = true;
}
}
if (anyOperationsToDo) {
if (!zoneNewMasksToVolumeMap.isEmpty()) {
List<URI> exportMaskList = new ArrayList<URI>();
exportMaskList.addAll(zoneNewMasksToVolumeMap.keySet());
Map<URI, Integer> overallVolumeMap = new HashMap<URI, Integer>();
for (Map<URI, Integer> oneVolumeMap : zoneNewMasksToVolumeMap.values()) {
overallVolumeMap.putAll(oneVolumeMap);
}
previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, exportMaskList, overallVolumeMap);
}
if (!zoneMasksToInitiatorsURIs.isEmpty()) {
previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, zoneMasksToInitiatorsURIs);
}
String successMessage = String.format("Successfully exported to initiators on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
}
Aggregations