use of com.emc.storageos.db.client.model.StoragePortGroup in project coprhd-controller by CoprHD.
the class VmaxExportOperations method refreshExportMask.
@Override
public ExportMask refreshExportMask(StorageSystem storage, ExportMask mask) throws DeviceControllerException {
long startTime = System.currentTimeMillis();
try {
CIMInstance instance = _helper.getSymmLunMaskingView(storage, mask);
if (instance != null) {
StringBuilder builder = new StringBuilder();
WBEMClient client = _helper.getConnection(storage).getCimClient();
String name = CIMPropertyFactory.getPropertyValue(instance, SmisConstants.CP_ELEMENT_NAME);
// Get volumes and initiators for the masking instance
Map<String, Integer> discoveredVolumes = _helper.getVolumesFromLunMaskingInstance(client, instance);
List<String> discoveredPorts = _helper.getInitiatorsFromLunMaskingInstance(client, instance);
Set existingInitiators = (mask.getExistingInitiators() != null) ? mask.getExistingInitiators() : Collections.emptySet();
Set existingVolumes = (mask.getExistingVolumes() != null) ? mask.getExistingVolumes().keySet() : Collections.emptySet();
builder.append(String.format("%nXM existing objects: %s I{%s} V:{%s}%n", name, Joiner.on(',').join(existingInitiators), Joiner.on(',').join(existingVolumes)));
builder.append(String.format("XM discovered: %s I:{%s} V:{%s}%n", name, Joiner.on(',').join(discoveredPorts), Joiner.on(',').join(discoveredVolumes.keySet())));
List<String> initiatorsToAddToExisting = new ArrayList<String>();
List<Initiator> initiatorsToAddToUserAddedAndInitiatorList = new ArrayList<Initiator>();
/**
* For the newly discovered initiators, if they are ViPR discovered ports and belong to same resource
* add them to user added and initiators list, otherwise add to existing list.
*/
for (String port : discoveredPorts) {
String normalizedPort = Initiator.normalizePort(port);
if (!mask.hasExistingInitiator(normalizedPort) && !mask.hasUserInitiator(normalizedPort)) {
Initiator existingInitiator = ExportUtils.getInitiator(Initiator.toPortNetworkId(port), _dbClient);
// Don't add additional initiator to initiators list if it belongs to different host/cluster
if (existingInitiator != null && !ExportMaskUtils.checkIfDifferentResource(mask, existingInitiator)) {
_log.info("Initiator {}->{} belonging to same compute, adding to userAdded and initiator list.", normalizedPort, existingInitiator.getId());
initiatorsToAddToUserAddedAndInitiatorList.add(existingInitiator);
} else {
initiatorsToAddToExisting.add(normalizedPort);
}
}
}
/**
* Get the existing initiators from the mask and remove the non-discovered ports because
* they are not discovered and are stale.
*
* If the mask has existing initiators but if they are discovered and belongs to same compute resource, then the
* initiators has to get added to user Added and initiators list, and removed from existing list.
*/
List<String> initiatorsToRemoveFromExistingList = new ArrayList<String>();
if (mask.getExistingInitiators() != null && !mask.getExistingInitiators().isEmpty()) {
for (String existingInitiatorStr : mask.getExistingInitiators()) {
if (!discoveredPorts.contains(existingInitiatorStr)) {
initiatorsToRemoveFromExistingList.add(existingInitiatorStr);
} else {
Initiator existingInitiator = ExportUtils.getInitiator(Initiator.toPortNetworkId(existingInitiatorStr), _dbClient);
if (existingInitiator != null && !ExportMaskUtils.checkIfDifferentResource(mask, existingInitiator)) {
_log.info("Initiator {}->{} belonging to same compute, removing from existing," + " and adding to userAdded and initiator list", existingInitiatorStr, existingInitiator.getId());
initiatorsToAddToUserAddedAndInitiatorList.add(existingInitiator);
initiatorsToRemoveFromExistingList.add(existingInitiatorStr);
}
}
}
}
/**
* Get all the initiators from the mask and remove all the ViPR discovered ports.
* The remaining list has to be removed from user Added and initiator list, because they are not available in ViPR
* but has to be moved to existing list.
*/
List<URI> initiatorsToRemoveFromUserAddedAndInitiatorList = new ArrayList<URI>();
if (mask.getInitiators() != null && !mask.getInitiators().isEmpty()) {
initiatorsToRemoveFromUserAddedAndInitiatorList.addAll(transform(mask.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
for (String port : discoveredPorts) {
String normalizedPort = Initiator.normalizePort(port);
Initiator initiatorDiscoveredInViPR = ExportUtils.getInitiator(Initiator.toPortNetworkId(port), _dbClient);
if (initiatorDiscoveredInViPR != null) {
initiatorsToRemoveFromUserAddedAndInitiatorList.remove(initiatorDiscoveredInViPR.getId());
} else if (!mask.hasExistingInitiator(normalizedPort)) {
_log.info("Initiator {} not found in database, removing from user Added and initiator list," + " and adding to existing list.", port);
initiatorsToAddToExisting.add(normalizedPort);
}
}
}
boolean removeInitiators = !initiatorsToRemoveFromExistingList.isEmpty() || !initiatorsToRemoveFromUserAddedAndInitiatorList.isEmpty();
boolean addInitiators = !initiatorsToAddToUserAddedAndInitiatorList.isEmpty() || !initiatorsToAddToExisting.isEmpty();
// Check the volumes and update the lists as necessary
Map<String, Integer> volumesToAdd = ExportMaskUtils.diffAndFindNewVolumes(mask, discoveredVolumes);
boolean addVolumes = !volumesToAdd.isEmpty();
boolean removeVolumes = false;
List<String> volumesToRemove = new ArrayList<String>();
// if the volume is in export mask's user added volumes and also in the existing volumes, remove from existing volumes
for (String wwn : discoveredVolumes.keySet()) {
if (mask.hasExistingVolume(wwn)) {
URIQueryResultList volumeList = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getVolumeWwnConstraint(wwn), volumeList);
if (volumeList.iterator().hasNext()) {
URI volumeURI = volumeList.iterator().next();
if (mask.hasUserCreatedVolume(volumeURI)) {
builder.append(String.format("\texisting volumes contain wwn %s, but it is also in the " + "export mask's user added volumes, so removing from existing volumes", wwn));
volumesToRemove.add(wwn);
}
}
}
}
if (mask.getExistingVolumes() != null && !mask.getExistingVolumes().isEmpty()) {
volumesToRemove.addAll(mask.getExistingVolumes().keySet());
volumesToRemove.removeAll(discoveredVolumes.keySet());
removeVolumes = !volumesToRemove.isEmpty();
}
// Update user added volume's HLU information in ExportMask and ExportGroup
ExportMaskUtils.updateHLUsInExportMask(mask, discoveredVolumes, _dbClient);
// Grab the storage ports that have been allocated for this
// existing mask and update them.
List<String> storagePorts = _helper.getStoragePortsFromLunMaskingInstance(client, instance);
List<String> storagePortURIs = ExportUtils.storagePortNamesToURIs(_dbClient, storagePorts);
// Check the storagePorts and update the lists as necessary
boolean addStoragePorts = false;
List<String> storagePortsToAdd = new ArrayList<>();
if (mask.getStoragePorts() == null) {
mask.setStoragePorts(new ArrayList<String>());
}
for (String portID : storagePortURIs) {
if (!mask.getStoragePorts().contains(portID)) {
storagePortsToAdd.add(portID);
addStoragePorts = true;
}
}
boolean removeStoragePorts = false;
List<String> storagePortsToRemove = new ArrayList<String>();
if (mask.getStoragePorts() != null && !mask.getStoragePorts().isEmpty()) {
storagePortsToRemove.addAll(mask.getStoragePorts());
storagePortsToRemove.removeAll(storagePortURIs);
removeStoragePorts = !storagePortsToRemove.isEmpty();
}
builder.append(String.format("XM refresh: %s existing initiators; add:{%s} remove:{%s}%n", name, Joiner.on(',').join(initiatorsToAddToExisting), Joiner.on(',').join(initiatorsToRemoveFromExistingList)));
builder.append(String.format("XM refresh: %s user added and initiator list; add:{%s} remove:{%s}%n", name, Joiner.on(',').join(initiatorsToAddToUserAddedAndInitiatorList), Joiner.on(',').join(initiatorsToRemoveFromUserAddedAndInitiatorList)));
builder.append(String.format("XM refresh: %s volumes; add:{%s} remove:{%s}%n", name, Joiner.on(',').join(volumesToAdd.keySet()), Joiner.on(',').join(volumesToRemove)));
builder.append(String.format("XM refresh: %s ports; add:{%s} remove:{%s}%n", name, Joiner.on(',').join(storagePortsToAdd), Joiner.on(',').join(storagePortsToRemove)));
// Any changes indicated, then update the mask and persist it
if (addInitiators || removeInitiators || addVolumes || removeVolumes || addStoragePorts || removeStoragePorts) {
mask.removeFromExistingInitiators(initiatorsToRemoveFromExistingList);
if (!initiatorsToRemoveFromUserAddedAndInitiatorList.isEmpty()) {
mask.removeInitiatorURIs(initiatorsToRemoveFromUserAddedAndInitiatorList);
mask.removeFromUserAddedInitiatorsByURI(initiatorsToRemoveFromUserAddedAndInitiatorList);
}
// https://coprhd.atlassian.net/browse/COP-17224 - For those cases where InitiatorGroups are shared
// by
// MaskingViews, if CoprHD processes one ExportMask by updating it with new initiators, then it
// could
// affect another ExportMasks. Consider that this refreshExportMask is against that other
// ExportMask.
// We shouldn't read the initiators that we find as 'existing' (that is created outside of CoprHD),
// instead we should consider them userAdded for this ExportMask, as well.
List<Initiator> userAddedInitiators = ExportMaskUtils.findIfInitiatorsAreUserAddedInAnotherMask(mask, initiatorsToAddToUserAddedAndInitiatorList, _dbClient);
mask.addToUserCreatedInitiators(userAddedInitiators);
builder.append(String.format("XM refresh: %s user added initiators; add:{%s} remove:{%s}%n", name, Joiner.on(',').join(userAddedInitiators), Joiner.on(',').join(initiatorsToRemoveFromUserAddedAndInitiatorList)));
mask.addInitiators(initiatorsToAddToUserAddedAndInitiatorList);
mask.addToUserCreatedInitiators(initiatorsToAddToUserAddedAndInitiatorList);
mask.addToExistingInitiatorsIfAbsent(initiatorsToAddToExisting);
mask.removeFromExistingInitiators(initiatorsToRemoveFromExistingList);
mask.removeFromExistingVolumes(volumesToRemove);
mask.addToExistingVolumesIfAbsent(volumesToAdd);
mask.getStoragePorts().addAll(storagePortsToAdd);
mask.getStoragePorts().removeAll(storagePortsToRemove);
URI pgURI = mask.getPortGroup();
if (!NullColumnValueGetter.isNullURI(pgURI) && (!storagePortsToAdd.isEmpty() || !storagePortsToRemove.isEmpty())) {
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, pgURI);
portGroup.getStoragePorts().addAll(storagePortsToAdd);
portGroup.getStoragePorts().removeAll(storagePortsToRemove);
_dbClient.updateObject(portGroup);
}
ExportMaskUtils.sanitizeExportMaskContainers(_dbClient, mask);
builder.append("XM refresh: There are changes to mask, " + "updating it...\n");
_dbClient.updateObject(mask);
} else {
builder.append("XM refresh: There are no changes to the mask\n");
}
_networkDeviceController.refreshZoningMap(mask, transform(initiatorsToRemoveFromUserAddedAndInitiatorList, CommonTransformerFunctions.FCTN_URI_TO_STRING), Collections.EMPTY_LIST, (addInitiators || removeInitiators), true);
_log.info(builder.toString());
}
} catch (Exception e) {
boolean throwException = true;
if (e instanceof WBEMException) {
WBEMException we = (WBEMException) e;
// Only throw exception if code is not CIM_ERROR_NOT_FOUND
throwException = (we.getID() != WBEMException.CIM_ERR_NOT_FOUND);
}
if (throwException) {
String msg = "Error when attempting to query LUN masking information: " + e.getMessage();
_log.error(MessageFormat.format("Encountered an SMIS error when attempting to refresh existing exports: {0}", msg), e);
throw SmisException.exceptions.refreshExistingMaskFailure(msg, e);
}
} finally {
long totalTime = System.currentTimeMillis() - startTime;
_log.info(String.format("refreshExportMask took %f seconds", (double) totalTime / (double) 1000));
}
return mask;
}
use of com.emc.storageos.db.client.model.StoragePortGroup in project coprhd-controller by CoprHD.
the class BlockIngestExportOrchestrator method updateExportMaskWithPortGroup.
/**
* Update the ingested exportMask with port group info. If the port group is not in the DB yet, create it.
*
* @param system - The storage system the export mask belongs to
* @param unmanagedMask - The corresponding unmanaged export mask
* @param mask - The ingested export mask
* @param exportGroup - The export group that to be updated
* @param blockId - The block object Id that is being ingested.
*/
protected void updateExportMaskWithPortGroup(StorageSystem system, UnManagedExportMask unmanagedMask, ExportMask mask, ExportGroup exportGroup, URI blockId) {
boolean portGroupEnabled = false;
if (Type.vmax.name().equals(system.getSystemType())) {
portGroupEnabled = Boolean.valueOf(_customConfigHandler.getComputedCustomConfigValue(CustomConfigConstants.VMAX_USE_PORT_GROUP_ENABLED, system.getSystemType(), null));
}
// Set port group
String portGroupName = unmanagedMask.getPortGroup();
if (NullColumnValueGetter.isNotNullValue(portGroupName)) {
// Port group name is set in the UnManagedMask
String guid = NativeGUIDGenerator.generateNativeGuidForStoragePortGroup(system, portGroupName);
URIQueryResultList result = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getPortGroupNativeGuidConstraint(guid), result);
Iterator<URI> it = result.iterator();
boolean foundPG = it.hasNext();
StoragePortGroup portGroup = null;
if (!foundPG) {
portGroup = new StoragePortGroup();
portGroup.setId(URIUtil.createId(StoragePortGroup.class));
portGroup.setLabel(portGroupName);
portGroup.setNativeGuid(guid);
portGroup.setStorageDevice(system.getId());
portGroup.setInactive(false);
_dbClient.createObject(portGroup);
} else {
URI pgURI = it.next();
portGroup = _dbClient.queryObject(StoragePortGroup.class, pgURI);
}
List<URI> targets = new ArrayList<URI>(Collections2.transform(unmanagedMask.getKnownStoragePortUris(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
if (portGroup.getStoragePorts() != null && !portGroup.getStoragePorts().isEmpty()) {
portGroup.getStoragePorts().replace(StringSetUtil.uriListToStringSet(targets));
} else {
portGroup.setStoragePorts(StringSetUtil.uriListToStringSet(targets));
}
if (portGroupEnabled) {
portGroup.setRegistrationStatus(RegistrationStatus.REGISTERED.name());
portGroup.setMutable(false);
} else {
portGroup.setRegistrationStatus(RegistrationStatus.UNREGISTERED.name());
portGroup.setMutable(true);
}
_dbClient.updateObject(portGroup);
mask.setPortGroup(portGroup.getId());
_dbClient.updateObject(mask);
// Update export group pathParms if port group feature enabled.
if (portGroupEnabled && blockId != null) {
ExportPathParams pathParam = new ExportPathParams();
pathParam.setLabel(exportGroup.getLabel());
pathParam.setExplicitlyCreated(false);
pathParam.setId(URIUtil.createId(ExportPathParams.class));
pathParam.setPortGroup(portGroup.getId());
pathParam.setInactive(false);
_dbClient.createObject(pathParam);
exportGroup.addToPathParameters(blockId, pathParam.getId());
_dbClient.updateObject(exportGroup);
}
}
}
use of com.emc.storageos.db.client.model.StoragePortGroup in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method changePortGroup.
@Override
public void changePortGroup(URI storageURI, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String token) {
ExportChangePortGroupCompleter taskCompleter = null;
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
taskCompleter = new ExportChangePortGroupCompleter(storageURI, exportGroupURI, token, portGroupURI);
logExportGroup(exportGroup, storageURI);
String workflowKey = "changePortGroup";
if (_workflowService.hasWorkflowBeenCreated(token, workflowKey)) {
return;
}
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), workflowKey, false, token);
if (CollectionUtils.isEmpty(exportMaskURIs)) {
_log.info("No export masks to change");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
String previousStep = null;
Set<URI> hostURIs = new HashSet<URI>();
SmisStorageDevice device = (SmisStorageDevice) getDevice();
for (ExportMask oldMask : exportMasks) {
oldMask = device.refreshExportMask(storage, oldMask);
StringSet existingInits = oldMask.getExistingInitiators();
StringMap existingVols = oldMask.getExistingVolumes();
if (!CollectionUtils.isEmpty(existingInits)) {
String error = String.format("The export mask %s has unmanaged initiators %s", oldMask.getMaskName(), Joiner.on(',').join(existingInits));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
if (!CollectionUtils.isEmpty(existingVols)) {
String error = String.format("The export mask %s has unmanaged volumes %s", oldMask.getMaskName(), Joiner.on(',').join(existingVols.keySet()));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
InitiatorHelper initiatorHelper = new InitiatorHelper(StringSetUtil.stringSetToUriList(oldMask.getInitiators())).process(exportGroup);
List<String> initiatorNames = initiatorHelper.getPortNames();
List<URI> volumes = StringSetUtil.stringSetToUriList(oldMask.getVolumes().keySet());
ExportPathParams pathParams = _blockScheduler.calculateExportPathParamForVolumes(volumes, 0, storageURI, exportGroupURI);
pathParams.setStoragePorts(portGroup.getStoragePorts());
List<Initiator> initiators = ExportUtils.getExportMaskInitiators(oldMask, _dbClient);
List<URI> initURIs = new ArrayList<URI>();
for (Initiator init : initiators) {
if (!NullColumnValueGetter.isNullURI(init.getHost())) {
hostURIs.add(init.getHost());
}
initURIs.add(init.getId());
}
// Get impacted export groups
List<ExportGroup> impactedExportGroups = ExportMaskUtils.getExportGroups(_dbClient, oldMask);
List<URI> exportGroupURIs = URIUtil.toUris(impactedExportGroups);
_log.info("changePortGroup: exportMask {}, impacted export groups: {}", oldMask.getMaskName(), Joiner.on(',').join(exportGroupURIs));
device.refreshPortGroup(portGroupURI);
// Trying to find if there is existing export mask or masking view for the same host and using the new
// port group. If found one, add the volumes in the current export mask to the new one; otherwise, create
// a new export mask/masking view, with the same storage group, initiator group and the new port group.
// then delete the current export mask.
ExportMask newMask = device.findExportMasksForPortGroupChange(storage, initiatorNames, portGroupURI);
Map<URI, Integer> volumesToAdd = StringMapUtil.stringMapToVolumeMap(oldMask.getVolumes());
if (newMask != null) {
updateZoningMap(exportGroup, newMask, true);
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), newMask.getMaskName()));
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(newMask), new ArrayList<URI>(volumesToAdd.keySet()));
String addVolumeStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskAddVolumeCompleter(exportGroupURI, newMask.getId(), volumesToAdd, addVolumeStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportGroupAddVolumes", storageURI, exportGroupURI, newMask.getId(), volumesToAdd, null, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupAddVolumes", storageURI, exportGroupURI, exportGroupURIs, newMask.getId(), volumesToAdd, initURIs, addVolumeStep);
previousStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Adding volumes to mask %s (%s)", newMask.getMaskName(), newMask.getId().toString()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, addVolumeStep);
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, newMask, volumesToAdd, null);
} else {
// We don't find existing export mask /masking view, we will create a new one.
// first, to construct the new export mask name, if the export mask has the original name, then
// append the new port group name to the current export mask name; if the export mask already has the current
// port group name appended, then remove the current port group name, and append the new one.
Map<URI, List<URI>> assignments = _blockScheduler.assignStoragePorts(storage, exportGroup, initiators, null, pathParams, volumes, _networkDeviceController, exportGroup.getVirtualArray(), token);
String oldName = oldMask.getMaskName();
URI oldPGURI = oldMask.getPortGroup();
if (oldPGURI != null) {
StoragePortGroup oldPG = _dbClient.queryObject(StoragePortGroup.class, oldPGURI);
if (oldPG != null) {
String pgName = oldPG.getLabel();
if (oldName.endsWith(pgName)) {
oldName = oldName.replaceAll(pgName, "");
}
}
}
String maskName = null;
if (oldName.endsWith("_")) {
maskName = String.format("%s%s", oldName, portGroup.getLabel());
} else {
maskName = String.format("%s_%s", oldName, portGroup.getLabel());
}
newMask = ExportMaskUtils.initializeExportMask(storage, exportGroup, initiators, volumesToAdd, getStoragePortsInPaths(assignments), assignments, maskName, _dbClient);
newMask.setPortGroup(portGroupURI);
List<BlockObject> vols = new ArrayList<BlockObject>();
for (URI boURI : volumesToAdd.keySet()) {
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
vols.add(bo);
}
newMask.addToUserCreatedVolumes(vols);
_dbClient.updateObject(newMask);
_log.info(String.format("Creating new exportMask %s", maskName));
// Make a new TaskCompleter for the exportStep. It has only one subtask.
// This is due to existing requirements in the doExportGroupCreate completion
// logic.
String maskingStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskChangePortGroupAddMaskCompleter(newMask.getId(), exportGroupURI, maskingStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportChangePortGroupAddPaths", storageURI, exportGroupURI, newMask.getId(), oldMask.getId(), portGroupURI, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupCreate", storageURI, exportGroupURI, newMask.getId(), maskingStep);
maskingStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Create export mask(%s) to use port group %s", newMask.getMaskName(), portGroup.getNativeGuid()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, maskingStep);
String zoningStep = workflow.createStepId();
List<URI> masks = new ArrayList<URI>();
masks.add(newMask.getId());
previousStep = generateZoningCreateWorkflow(workflow, maskingStep, exportGroup, masks, volumesToAdd, zoningStep);
}
}
previousStep = _wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (waitForApproval) {
// Insert a step that will be suspended. When it resumes, it will re-acquire the lock keys,
// which are released when the workflow suspends.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), storageURI);
String suspendMessage = "Adjust/rescan host/cluster paths. Press \"Resume\" to start removal of unnecessary paths." + "\"Rollback\" will terminate the order and roll back";
Workflow.Method method = WorkflowService.acquireWorkflowLocksMethod(lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
Workflow.Method rollbackNull = Workflow.NULL_METHOD;
previousStep = workflow.createStep("AcquireLocks", "Suspending for user verification of host/cluster connectivity.", previousStep, storage.getId(), storage.getSystemType(), WorkflowService.class, method, rollbackNull, waitForApproval, null);
workflow.setSuspendedStepMessage(previousStep, suspendMessage);
}
for (ExportMask exportMask : exportMasks) {
previousStep = generateChangePortGroupDeleteMaskWorkflowstep(storageURI, exportGroup, exportMask, previousStep, workflow);
}
_wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The change port group workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
// update ExportChangePortGroupCompleter with affected export groups
Set<URI> affectedExportGroups = new HashSet<URI>();
for (ExportMask mask : exportMasks) {
List<ExportGroup> assocExportGroups = ExportMaskUtils.getExportGroups(_dbClient, mask);
for (ExportGroup eg : assocExportGroups) {
affectedExportGroups.add(eg.getId());
}
}
taskCompleter.setAffectedExportGroups(affectedExportGroups);
workflow.executePlan(taskCompleter, "Change port group successfully.");
_workflowService.markWorkflowBeenCreated(token, workflowKey);
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Export change port group Orchestration failed.", e);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.db.client.model.StoragePortGroup in project coprhd-controller by CoprHD.
the class ExportWorkflowUtils method generateExportGroupChangePortWorkflow.
/**
* Generate step for change port group
*
* @param workflow - Workflow
* @param wfGroupId - Workflow group Id
* @param exportGroupURI - Export group URI
* @param portGroupURI - New port group URI
* @param exportMaskURIs - The URI list of affected export masks in the export group
* @param waitForApproval - If wait until approval
* @return - The generated step
* @throws ControllerException
*/
public String generateExportGroupChangePortWorkflow(Workflow workflow, String wfGroupId, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval) throws ControllerException {
Workflow.Method rollbackMethod = rollbackMethodNullMethod();
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
DiscoveredSystemObject system = _dbClient.queryObject(StorageSystem.class, portGroup.getStorageDevice());
Workflow.Method method = ExportWorkflowEntryPoints.exportChangePortGroupMethod(system.getId(), exportGroupURI, portGroupURI, exportMaskURIs, waitForApproval);
String stepDescription = String.format("Change port group to %s for the export group %s", portGroup.getNativeGuid(), exportGroup.getLabel());
return newWorkflowStep(workflow, wfGroupId, stepDescription, system, method, rollbackMethod, null);
}
use of com.emc.storageos.db.client.model.StoragePortGroup in project coprhd-controller by CoprHD.
the class StoragePortGroupDeleteCompleter method complete.
@Override
protected void complete(DbClient dbClient, Status status, ServiceCoded coded) throws DeviceControllerException {
try {
StoragePortGroup portGroup = dbClient.queryObject(StoragePortGroup.class, getId());
if (status == Status.ready && portGroup != null) {
dbClient.ready(StoragePortGroup.class, getId(), getOpId());
dbClient.removeObject(portGroup);
} else if (status == Status.error) {
log.error("The status is error.");
dbClient.error(StoragePortGroup.class, getId(), getOpId(), coded);
}
} catch (Exception e) {
log.error("Failed updating status", e);
} finally {
updateWorkflowStatus(status, coded);
}
}
Aggregations