use of com.emc.storageos.volumecontroller.impl.utils.ExportOperationContext in project coprhd-controller by CoprHD.
the class VmaxExportOperations method deleteExportMask.
@Override
public void deleteExportMask(StorageSystem storage, URI exportMaskURI, List<URI> volumeURIList, List<URI> targetURIList, List<Initiator> initiatorList, TaskCompleter taskCompleter) throws DeviceControllerException {
_log.info("{} deleteExportMask START...", storage.getSerialNumber());
try {
_log.info("Export mask id: {}", exportMaskURI);
if (volumeURIList != null) {
_log.info("deleteExportMask: volumes: {}", Joiner.on(',').join(volumeURIList));
}
if (targetURIList != null) {
_log.info("deleteExportMask: assignments: {}", Joiner.on(',').join(targetURIList));
}
if (initiatorList != null) {
_log.info("deleteExportMask: initiators: {}", Joiner.on(',').join(initiatorList));
}
boolean isVmax3 = storage.checkIfVmax3();
WBEMClient client = _helper.getConnection(storage).getCimClient();
String maskingViewName = _helper.getExportMaskName(exportMaskURI);
// Always get the Storage Group from masking View, rather than depending on the name to find out SG.
String groupName = _helper.getStorageGroupForGivenMaskingView(maskingViewName, storage);
/*
* The idea is to remove orphaned child Groups, after deleting masking view. We're getting
* the list of childGroups here because once we call deleteMaskingView, the parent group
* will be automatically deleted.
*
* Run Associator Names to get details of child Storage Groups ,and group them based on
* Fast Policy.
*/
Map<StorageGroupPolicyLimitsParam, List<String>> childGroupsByFast = new HashMap<StorageGroupPolicyLimitsParam, List<String>>();
// if SGs are already removed from masking view manually, then skip this part
if (null != groupName) {
childGroupsByFast = _helper.groupStorageGroupsByAssociation(storage, groupName);
} else {
_log.info("Masking View {} doesn't have any SGs associated, probably removed manually from Array", maskingViewName);
if (isVmax3) {
// If we did not find the storage group associated with the masking view it could be
// the case that were were unexporting volumes and successfully deleted the masking
// but failed at some point thereafter, and now the operation is being retried. If
// that is the case, then for VMAX3 we want to make sure that none of the volumes being
// unexported are still in non parking storage groups. If we find such volume we remove
// them and add them to the parking storage group.
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
if (exportMask == null) {
// If we can't find the mask, there is really no cleanup we can do.
_log.warn("ExportMask {} no longer exists", exportMaskURI);
taskCompleter.ready(_dbClient);
return;
}
// See if any of the mask's volumes are still in a non-parking storage group. Map the
// volumes by group name.
List<URI> volumeURIs = ExportMaskUtils.getVolumeURIs(exportMask);
Map<String, List<URI>> volumesInNonParkingStorageGroup = _helper.getVolumesInNonParkingStorageGroup(storage, volumeURIs);
if (!volumesInNonParkingStorageGroup.isEmpty()) {
Map<String, Set<String>> volumeDeviceIdsMap = new HashMap<>();
for (Entry<String, List<URI>> storageGroupEntry : volumesInNonParkingStorageGroup.entrySet()) {
String storageGroupName = storageGroupEntry.getKey();
List<URI> storageGroupVolumeURIs = storageGroupEntry.getValue();
// then just skip the volume, we cannot clean it up as we may impact other exports.
if (_helper.findStorageGroupsAssociatedWithMultipleParents(storage, storageGroupName) || _helper.findStorageGroupsAssociatedWithOtherMaskingViews(storage, storageGroupName)) {
_log.info("Storage group {} is associated with multiple paranets or other masking views", storageGroupName);
continue;
}
// Otherwise, remove the volumes from the storage group.
_log.info("Removing volumes {} from non parking storage group {}", storageGroupVolumeURIs, storageGroupName);
_helper.removeVolumesFromStorageGroup(storage, storageGroupName, storageGroupVolumeURIs, true);
// parking storage group.
for (URI storageGroupVolumeURI : storageGroupVolumeURIs) {
Volume storageGroupVolume = _dbClient.queryObject(Volume.class, storageGroupVolumeURI);
if (storageGroupVolume != null) {
String policyName = ControllerUtils.getAutoTieringPolicyName(storageGroupVolumeURI, _dbClient);
String policyKey = _helper.getVMAX3FastSettingForVolume(storageGroupVolumeURI, policyName);
if (volumeDeviceIdsMap.containsKey(policyKey)) {
volumeDeviceIdsMap.get(policyKey).add(storageGroupVolume.getNativeId());
} else {
Set<String> volumeDeviceIds = new HashSet<>();
volumeDeviceIds.add(storageGroupVolume.getNativeId());
volumeDeviceIdsMap.put(policyKey, volumeDeviceIds);
}
}
}
}
// Finally for each parking storage group policy, add the volumes associated parking storage group.
for (Entry<String, Set<String>> volumeDeviceIdsMapEntry : volumeDeviceIdsMap.entrySet()) {
_log.info("Adding volumes {} on system {} to parking storage group for policy {}", volumeDeviceIdsMapEntry.getValue(), storage.getNativeGuid(), volumeDeviceIdsMapEntry.getKey());
addVolumesToParkingStorageGroup(storage, volumeDeviceIdsMapEntry.getKey(), volumeDeviceIdsMapEntry.getValue());
}
}
}
taskCompleter.ready(_dbClient);
return;
}
/*
* If a maskingView was created by other instance, can not delete it here during roll back. Hence,
* set task as done.
*/
if (taskCompleter instanceof RollbackExportGroupCreateCompleter) {
/*
* The purpose of rollback is to delete the masking view created by this very ViPR instance as
* part of this workflow, and it should not delete masking view created externally or another ViPR
*/
// Get the context from the task completer, in case this is a rollback.
ExportOperationContext context = (ExportOperationContext) WorkflowService.getInstance().loadStepData(taskCompleter.getOpId());
if (context != null) {
exportMaskRollback(storage, context, taskCompleter);
}
} else {
ExportOperationContext context = (ExportOperationContext) WorkflowService.getInstance().loadStepData(taskCompleter.getOpId());
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
List<URI> volumeURIs = ExportMaskUtils.getVolumeURIs(exportMask);
ExportMaskValidationContext ctx = new ExportMaskValidationContext();
ctx.setStorage(storage);
ctx.setExportMask(exportMask);
ctx.setBlockObjects(volumeURIList, _dbClient);
ctx.setInitiators(initiatorList);
ctx.setAllowExceptions(context == null);
validator.exportMaskDelete(ctx).validate();
if (!deleteMaskingView(storage, exportMaskURI, childGroupsByFast, taskCompleter)) {
// deleteMaskingView call. Simply return from here.
return;
}
for (Map.Entry<StorageGroupPolicyLimitsParam, List<String>> entry : childGroupsByFast.entrySet()) {
_log.info(String.format("Mask %s FAST Policy %s associated with %d Storage Group(s)", maskingViewName, entry.getKey(), entry.getValue().size()));
}
if (groupName != null) {
_log.info("storage group name : {}", groupName);
// delete the CSG explicitly (CTRL-9236)
CIMObjectPath storageGroupPath = _cimPath.getMaskingGroupPath(storage, groupName, SmisCommandHelper.MASKING_GROUP_TYPE.SE_DeviceMaskingGroup);
// check if the storage group is shared before delete it
if (_helper.checkExists(storage, storageGroupPath, false, false) != null && _helper.checkMaskingGroupShared(storage, storageGroupPath, exportMask.getMaskName())) {
// if the storage group is shared, don't delete the storage group
_log.info("The Storage group {} is shared, so it will not be deleted", groupName);
taskCompleter.ready(_dbClient);
return;
}
if (_helper.isCascadedSG(storage, storageGroupPath)) {
_helper.deleteMaskingGroup(storage, groupName, SmisCommandHelper.MASKING_GROUP_TYPE.SE_DeviceMaskingGroup);
}
/**
* After successful deletion of masking view, try to remove the child Storage Groups ,which were
* part of cascaded
* Parent Group. If Fast Policy is not enabled, then those child groups can be removed.
* If Fast enabled, then try to find if this child Storage Group is associated with more than 1
* Parent Cascaded
* Group, if yes, then we cannot delete the child Storage Group.
*/
for (Entry<StorageGroupPolicyLimitsParam, List<String>> childGroupByFastEntry : childGroupsByFast.entrySet()) {
for (String childGroupName : childGroupByFastEntry.getValue()) {
_log.info("Processing Group {} deletion with Fast Policy {}", childGroupName, childGroupByFastEntry.getKey());
CIMObjectPath maskingGroupPath = _cimPath.getMaskingGroupPath(storage, childGroupName, SmisCommandHelper.MASKING_GROUP_TYPE.SE_DeviceMaskingGroup);
if (!_helper.isFastPolicy(childGroupByFastEntry.getKey().getAutoTierPolicyName())) {
/**
* Remove the volumes from any phantom storage group (CTRL-8217).
*
* Volumes part of Phantom Storage Group will be in Non-CSG Non-FAST Storage Group
*/
if (!_helper.isCascadedSG(storage, maskingGroupPath)) {
// Get volumes which are part of this Storage Group
List<URI> volumesInSG = _helper.findVolumesInStorageGroup(storage, childGroupName, volumeURIs);
// Flag to indicate whether or not we need to use the EMCForce flag on this
// operation.
// We currently use this flag when dealing with RP Volumes as they are tagged for RP
// and the
// operation on these volumes would fail otherwise.
boolean forceFlag = false;
for (URI volURI : volumesInSG) {
forceFlag = ExportUtils.useEMCForceFlag(_dbClient, volURI);
if (forceFlag) {
break;
}
}
removeVolumesFromPhantomStorageGroup(storage, client, exportMaskURI, volumesInSG, childGroupName, forceFlag);
}
// Delete the Storage Group
_helper.deleteMaskingGroup(storage, childGroupName, SmisCommandHelper.MASKING_GROUP_TYPE.SE_DeviceMaskingGroup);
} else if (!_helper.findStorageGroupsAssociatedWithMultipleParents(storage, childGroupName) && !_helper.findStorageGroupsAssociatedWithOtherMaskingViews(storage, childGroupName)) {
// volumeDeviceIds and policyName are required in case of VMAX3 to add volumes back
// to parking to storage group.
Set<String> volumeDeviceIds = new HashSet<String>();
String policyName = childGroupByFastEntry.getKey().getAutoTierPolicyName();
if (isVmax3) {
volumeDeviceIds = _helper.getVolumeDeviceIdsFromStorageGroup(storage, childGroupName);
}
// holds the group, if yes, then we should not delete this group
if (!isVmax3) {
_log.debug("Removing Storage Group {} from Fast Policy {}", childGroupName, childGroupByFastEntry.getKey());
_helper.removeVolumeGroupFromPolicyAndLimitsAssociation(client, storage, maskingGroupPath);
}
_log.debug("Deleting Storage Group {}", childGroupName);
_helper.deleteMaskingGroup(storage, childGroupName, SmisCommandHelper.MASKING_GROUP_TYPE.SE_DeviceMaskingGroup);
if (isVmax3 && !volumeDeviceIds.isEmpty()) {
// We need to add volumes back to appropriate parking storage group.
addVolumesToParkingStorageGroup(storage, policyName, volumeDeviceIds);
}
} else {
_log.info("Storage Group {} is either having more than one parent Storage Group or its part of another existing masking view", childGroupName);
// set Host IO Limits on SG which we reseted before deleting MV
if (childGroupByFastEntry.getKey().isHostIOLimitIOPsSet()) {
_helper.updateHostIOLimitIOPs(client, maskingGroupPath, childGroupByFastEntry.getKey().getHostIOLimitIOPs());
}
if (childGroupByFastEntry.getKey().isHostIOLimitBandwidthSet()) {
_helper.updateHostIOLimitBandwidth(client, maskingGroupPath, childGroupByFastEntry.getKey().getHostIOLimitBandwidth());
}
}
}
}
}
}
taskCompleter.ready(_dbClient);
} catch (Exception e) {
_log.error(String.format("deleteExportMask failed - maskName: %s", exportMaskURI.toString()), e);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
}
_log.info("{} deleteExportMask END...", storage.getSerialNumber());
}
use of com.emc.storageos.volumecontroller.impl.utils.ExportOperationContext in project coprhd-controller by CoprHD.
the class VNXeExportOperations method removeInitiators.
@Override
public void removeInitiators(StorageSystem storage, URI exportMask, List<URI> volumeURIList, List<Initiator> initiators, List<URI> targets, TaskCompleter taskCompleter) throws DeviceControllerException {
_logger.info("{} removeInitiators START...", storage.getSerialNumber());
ExportMask mask = _dbClient.queryObject(ExportMask.class, exportMask);
if (mask == null || mask.getInactive()) {
_logger.error(String.format("The exportMask %s is invalid.", exportMask));
throw DeviceControllerException.exceptions.invalidObjectNull();
}
boolean isRollback = WorkflowService.getInstance().isStepInRollbackState(taskCompleter.getOpId());
if (isRollback) {
List<Initiator> addedInitiators = new ArrayList<Initiator>();
// Get the context from the task completer, in case this is a rollback.
ExportOperationContext context = (ExportOperationContext) WorkflowService.getInstance().loadStepData(taskCompleter.getOpId());
if (context != null && context.getOperations() != null) {
_logger.info("Handling removeInitiators as a result of rollback");
ListIterator li = context.getOperations().listIterator(context.getOperations().size());
while (li.hasPrevious()) {
ExportOperationContextOperation operation = (ExportOperationContextOperation) li.previous();
if (operation != null && VNXeExportOperationContext.OPERATION_ADD_INITIATORS_TO_HOST.equals(operation.getOperation())) {
addedInitiators = (List<Initiator>) operation.getArgs().get(0);
_logger.info("Removing initiators {} as part of rollback", Joiner.on(',').join(addedInitiators));
}
}
}
// Update the initiators in the task completer such that we update the export mask/group correctly
for (Initiator initiator : initiators) {
if (addedInitiators == null || !addedInitiators.contains(initiator)) {
((ExportMaskRemoveInitiatorCompleter) taskCompleter).removeInitiator(initiator.getId());
}
}
initiators = addedInitiators;
if (initiators == null || initiators.isEmpty()) {
_logger.info("There was no context found for add initiator. So there is nothing to rollback.");
taskCompleter.ready(_dbClient);
return;
}
}
StringSet initiatorsInMask = mask.getInitiators();
List<Initiator> initiatorToBeRemoved = new ArrayList<>();
for (Initiator initiator : initiators) {
if (initiatorsInMask.contains(initiator.getId().toString())) {
initiatorToBeRemoved.add(initiator);
}
}
try {
VNXeApiClient apiClient = getVnxeClient(storage);
List<Initiator> allInitiators = ExportUtils.getExportMaskInitiators(exportMask, _dbClient);
String vnxeHostId = getHostIdFromInitiators(allInitiators, apiClient);
if (vnxeHostId != null) {
List<VNXeHostInitiator> vnxeInitiators = apiClient.getInitiatorsByHostId(vnxeHostId);
// initiators is a subset of allInitiators
Map<Initiator, VNXeHostInitiator> vnxeInitiatorsToBeRemoved = prepareInitiators(initiatorToBeRemoved);
Set<String> initiatorIds = new HashSet<String>();
for (VNXeHostInitiator vnxeInit : vnxeInitiators) {
initiatorIds.add(vnxeInit.getInitiatorId());
}
Set<String> initiatorsToBeRemoved = new HashSet<String>();
for (VNXeHostInitiator vnxeInit : vnxeInitiatorsToBeRemoved.values()) {
String initiatorId = vnxeInit.getId();
if (initiatorIds.remove(initiatorId)) {
initiatorsToBeRemoved.add(initiatorId);
}
}
ExportMaskValidationContext ctx = new ExportMaskValidationContext();
ctx.setStorage(storage);
ctx.setExportMask(mask);
ctx.setBlockObjects(volumeURIList, _dbClient);
// Allow exceptions to be thrown when not rolling back
ctx.setAllowExceptions(!isRollback);
AbstractVNXeValidator removeInitiatorsValidator = (AbstractVNXeValidator) validator.removeInitiators(ctx);
removeInitiatorsValidator.setHostId(vnxeHostId);
removeInitiatorsValidator.validate();
// 3. shared initiators, but all export masks have same set of initiators
if (!isRollback) {
boolean hasSharedInitiator = false;
for (Initiator initiator : initiatorToBeRemoved) {
if (ExportUtils.isInitiatorSharedByMasks(_dbClient, mask, initiator.getId())) {
hasSharedInitiator = true;
break;
}
}
if (hasSharedInitiator) {
validateAllMasks(_dbClient, mask, apiClient, vnxeHostId);
}
}
}
List<String> initiatorIdList = new ArrayList<>();
for (Initiator initiator : initiatorToBeRemoved) {
_logger.info("Processing initiator {}", initiator.getLabel());
if (vnxeHostId != null) {
String initiatorId = initiator.getInitiatorPort();
if (Protocol.FC.name().equals(initiator.getProtocol())) {
initiatorId = initiator.getInitiatorNode() + ":" + initiatorId;
}
initiatorIdList.add(initiatorId);
}
mask.removeFromExistingInitiators(initiator);
mask.removeFromUserCreatedInitiators(initiator);
}
if (!initiatorIdList.isEmpty()) {
apiClient.deleteInitiators(initiatorIdList);
}
_dbClient.updateObject(mask);
taskCompleter.ready(_dbClient);
} catch (Exception e) {
_logger.error("Problem in removeInitiators: ", e);
ServiceError serviceError = DeviceControllerErrors.vnxe.jobFailed("removeInitiator", e.getMessage());
taskCompleter.error(_dbClient, serviceError);
}
_logger.info("{} removeInitiators END...", storage.getSerialNumber());
}
use of com.emc.storageos.volumecontroller.impl.utils.ExportOperationContext in project coprhd-controller by CoprHD.
the class VNXeExportOperations method deleteExportMask.
@Override
public void deleteExportMask(StorageSystem storage, URI exportMaskUri, List<URI> volumeURIList, List<URI> targetURIList, List<Initiator> initiatorList, TaskCompleter taskCompleter) throws DeviceControllerException {
_logger.info("{} deleteExportMask START...", storage.getSerialNumber());
boolean removeLastInitiator = false;
List<URI> volumesToBeUnmapped = new ArrayList<URI>();
try {
_logger.info("Export mask id: {}", exportMaskUri);
if (volumeURIList != null) {
_logger.info("deleteExportMask: volumes: {}", Joiner.on(',').join(volumeURIList));
}
if (targetURIList != null) {
_logger.info("deleteExportMask: assignments: {}", Joiner.on(',').join(targetURIList));
}
if (initiatorList != null) {
if (!initiatorList.isEmpty()) {
removeLastInitiator = true;
_logger.info("deleteExportMask: initiators: {}", Joiner.on(',').join(initiatorList));
}
}
// Get the context from the task completer, in case this is a rollback.
boolean isRollback = WorkflowService.getInstance().isStepInRollbackState(taskCompleter.getOpId());
if (isRollback) {
List<URI> addedVolumes = new ArrayList<URI>();
ExportOperationContext context = (ExportOperationContext) WorkflowService.getInstance().loadStepData(taskCompleter.getOpId());
if (context != null && context.getOperations() != null) {
_logger.info("Handling deleteExportMask as a result of rollback");
ListIterator li = context.getOperations().listIterator(context.getOperations().size());
while (li.hasPrevious()) {
ExportOperationContextOperation operation = (ExportOperationContextOperation) li.previous();
if (operation != null && VNXeExportOperationContext.OPERATION_ADD_VOLUMES_TO_HOST_EXPORT.equals(operation.getOperation())) {
addedVolumes = (List<URI>) operation.getArgs().get(0);
_logger.info("Removing volumes {} as part of rollback", Joiner.on(',').join(addedVolumes));
}
}
}
volumesToBeUnmapped = addedVolumes;
if (volumesToBeUnmapped == null || volumesToBeUnmapped.isEmpty()) {
_logger.info("There was no context found for add volumes. So there is nothing to rollback.");
taskCompleter.ready(_dbClient);
return;
}
} else {
volumesToBeUnmapped = volumeURIList;
}
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskUri);
if (exportMask == null || exportMask.getInactive()) {
throw new DeviceControllerException("Invalid ExportMask URI: " + exportMaskUri);
}
if (initiatorList.isEmpty()) {
initiatorList = ExportUtils.getExportMaskInitiators(exportMask, _dbClient);
}
VNXeApiClient apiClient = getVnxeClient(storage);
String hostId = getHostIdFromInitiators(initiatorList, apiClient);
Set<String> allExportedVolumes = new HashSet<>();
if (hostId != null) {
ExportMaskValidationContext ctx = new ExportMaskValidationContext();
ctx.setStorage(storage);
ctx.setExportMask(exportMask);
ctx.setBlockObjects(volumeURIList, _dbClient);
ctx.setInitiators(initiatorList);
// Allow exceptions to be thrown when not rolling back
ctx.setAllowExceptions(!isRollback);
AbstractVNXeValidator deleteMaskValidator = (AbstractVNXeValidator) validator.exportMaskDelete(ctx);
deleteMaskValidator.setHostId(hostId);
deleteMaskValidator.validate();
if (removeLastInitiator) {
ctx = new ExportMaskValidationContext();
ctx.setStorage(storage);
ctx.setExportMask(exportMask);
ctx.setBlockObjects(volumeURIList, _dbClient);
ctx.setAllowExceptions(!isRollback);
AbstractVNXeValidator removeInitiatorsValidator = (AbstractVNXeValidator) validator.removeInitiators(ctx);
removeInitiatorsValidator.setHostId(hostId);
removeInitiatorsValidator.validate();
boolean hasSharedInitiator = false;
for (String strUri : exportMask.getInitiators()) {
if (ExportUtils.isInitiatorSharedByMasks(_dbClient, exportMask, URI.create(strUri))) {
hasSharedInitiator = true;
_logger.info("Initiators are used by multiple export masks");
break;
}
}
if (hasSharedInitiator) {
// if any initiator is shared, all initiators have to be shared, and each mask should have same set of initiators
// Otherwise, removing initiator will not be allowed, user can delete individual export mask
Collection<ExportMask> masksWithSharedInitiators = validateAllMasks(_dbClient, exportMask, apiClient, hostId);
_logger.info("Masks use the same initiators {}", Joiner.on(", ").join(Collections2.transform(masksWithSharedInitiators, CommonTransformerFunctions.fctnDataObjectToForDisplay())));
// need to unexport all volumes of all export masks
// except shared export co-exists with exclusive export, don't touch exclusive export
// in case of multiple shared exports (e.g., with different projects), all exported LUNs will be unmapped, regardless exclusive export
String exportType = ExportMaskUtils.getExportType(_dbClient, exportMask);
if (ExportGroupType.Cluster.name().equals(exportType)) {
Iterator<ExportMask> maskIter = masksWithSharedInitiators.iterator();
while (maskIter.hasNext()) {
ExportMask mask = maskIter.next();
if (!ExportGroupType.Cluster.name().equals(ExportMaskUtils.getExportType(_dbClient, mask))) {
_logger.info("Ignore exclusive export {}", mask.getMaskName());
maskIter.remove();
}
}
}
volumesToBeUnmapped.addAll(getExportedVolumes(_dbClient, storage.getId(), masksWithSharedInitiators));
}
}
allExportedVolumes = ExportUtils.getAllLUNsForHost(_dbClient, exportMask);
}
String opId = taskCompleter.getOpId();
Set<String> processedCGs = new HashSet<String>();
for (URI volUri : volumesToBeUnmapped) {
if (hostId != null) {
BlockObject blockObject = BlockObject.fetch(_dbClient, volUri);
String nativeId = blockObject.getNativeId();
String cgName = VNXeUtils.getBlockObjectCGName(blockObject, _dbClient);
if (cgName != null && !processedCGs.contains(cgName)) {
processedCGs.add(cgName);
VNXeUtils.getCGLock(workflowService, storage, cgName, opId);
}
if (URIUtil.isType(volUri, Volume.class)) {
apiClient.unexportLun(hostId, nativeId);
} else if (URIUtil.isType(volUri, BlockSnapshot.class)) {
if (BlockObject.checkForRP(_dbClient, volUri)) {
_logger.info(String.format("BlockObject %s is a RecoverPoint bookmark. Un-exporting associated lun %s instead of snap.", volUri, nativeId));
apiClient.unexportLun(hostId, nativeId);
} else {
apiClient.unexportSnap(hostId, nativeId);
setSnapWWN(apiClient, blockObject, nativeId);
}
}
}
// update the exportMask object
exportMask.removeVolume(volUri);
}
// check if there are LUNs on array
// initiator will not be able to removed if there are LUNs belongs to other masks (if initiator is shared), or unknown to ViPR
Set<String> lunIds = new HashSet<>();
if (hostId != null) {
lunIds = apiClient.getHostLUNIds(hostId);
_logger.info("Mapped resources {}", Joiner.on(", ").join(lunIds));
}
boolean hasLUN = lunIds.isEmpty() ? false : true;
lunIds.removeAll(allExportedVolumes);
boolean hasUnknownLUN = lunIds.isEmpty() ? false : true;
_logger.info("Export mask deletion - hasLUN {}, hasUnknownLUN {}", hasLUN, hasUnknownLUN);
for (Initiator initiator : initiatorList) {
_logger.info("Processing initiator {}", initiator.getLabel());
if (hostId != null && (!hasLUN || (!hasUnknownLUN && !ExportUtils.isInitiatorSharedByMasks(_dbClient, exportMask, initiator.getId())))) {
String initiatorId = initiator.getInitiatorPort();
if (Protocol.FC.name().equals(initiator.getProtocol())) {
initiatorId = initiator.getInitiatorNode() + ":" + initiatorId;
}
try {
if (hasLUN) {
// move and delete initiator
apiClient.deleteInitiators(new ArrayList<String>(Arrays.asList(initiatorId)));
} else {
apiClient.deleteInitiator(initiatorId);
}
} catch (VNXeException e) {
_logger.warn("Error on deleting initiator: {}", e.getMessage());
}
}
exportMask.removeFromExistingInitiators(initiator);
exportMask.removeFromUserCreatedInitiators(initiator);
}
_dbClient.updateObject(exportMask);
if (hostId != null) {
List<VNXeHostInitiator> vnxeInitiators = apiClient.getInitiatorsByHostId(hostId);
if (vnxeInitiators.isEmpty()) {
Set<String> vnxeLUNIds = apiClient.getHostLUNIds(hostId);
if ((vnxeLUNIds.isEmpty())) {
try {
apiClient.deleteHost(hostId);
} catch (VNXeException e) {
_logger.warn("Error on deleting host: {}", e.getMessage());
}
}
}
}
List<ExportGroup> exportGroups = ExportMaskUtils.getExportGroups(_dbClient, exportMask);
if (exportGroups != null) {
// Remove the mask references in the export group
for (ExportGroup exportGroup : exportGroups) {
// Remove this mask from the export group
exportGroup.removeExportMask(exportMask.getId().toString());
}
// Update all of the export groups in the DB
_dbClient.updateObject(exportGroups);
}
taskCompleter.ready(_dbClient);
} catch (Exception e) {
_logger.error("Unexpected error: deleteExportMask failed.", e);
ServiceError error = DeviceControllerErrors.vnxe.jobFailed("deleteExportMask", e.getMessage());
taskCompleter.error(_dbClient, error);
}
_logger.info("{} deleteExportMask END...", storage.getSerialNumber());
}
use of com.emc.storageos.volumecontroller.impl.utils.ExportOperationContext in project coprhd-controller by CoprHD.
the class VNXeExportOperations method createExportMask.
@Override
public void createExportMask(StorageSystem storage, URI exportMask, VolumeURIHLU[] volumeURIHLUs, List<URI> targetURIList, List<Initiator> initiatorList, TaskCompleter taskCompleter) throws DeviceControllerException {
_logger.info("{} createExportMask START...", storage.getSerialNumber());
VNXeApiClient apiClient = getVnxeClient(storage);
List<URI> mappedVolumes = new ArrayList<URI>();
ExportMask mask = null;
try {
_logger.info("createExportMask: Export mask id: {}", exportMask);
_logger.info("createExportMask: volume-HLU pairs: {}", Joiner.on(',').join(volumeURIHLUs));
_logger.info("createExportMask: initiators: {}", Joiner.on(',').join(initiatorList));
_logger.info("createExportMask: assignments: {}", Joiner.on(',').join(targetURIList));
ExportOperationContext context = new VNXeExportOperationContext();
taskCompleter.updateWorkflowStepContext(context);
mask = _dbClient.queryObject(ExportMask.class, exportMask);
if (mask == null || mask.getInactive()) {
throw new DeviceControllerException("Invalid ExportMask URI: " + exportMask);
}
Set<String> processedCGs = new HashSet<String>();
Collection<VNXeHostInitiator> initiators = prepareInitiators(initiatorList).values();
VNXeBase host = apiClient.prepareHostsForExport(initiators);
validateInitiators(_dbClient, initiatorList, apiClient, host.getId());
String opId = taskCompleter.getOpId();
for (VolumeURIHLU volURIHLU : volumeURIHLUs) {
URI volUri = volURIHLU.getVolumeURI();
String hlu = volURIHLU.getHLU();
_logger.info(String.format("hlu %s", hlu));
BlockObject blockObject = BlockObject.fetch(_dbClient, volUri);
String nativeId = blockObject.getNativeId();
VNXeExportResult result = null;
Integer newhlu = -1;
if (hlu != null && !hlu.isEmpty() && !hlu.equals(ExportGroup.LUN_UNASSIGNED_STR)) {
newhlu = Integer.valueOf(hlu);
}
String cgName = VNXeUtils.getBlockObjectCGName(blockObject, _dbClient);
if (cgName != null && !processedCGs.contains(cgName)) {
processedCGs.add(cgName);
VNXeUtils.getCGLock(workflowService, storage, cgName, opId);
}
if (URIUtil.isType(volUri, Volume.class)) {
result = apiClient.exportLun(host, nativeId, newhlu);
mask.addVolume(volUri, result.getHlu());
if (result.isNewAccess()) {
mappedVolumes.add(volUri);
}
} else if (URIUtil.isType(volUri, BlockSnapshot.class)) {
if (BlockObject.checkForRP(_dbClient, volUri)) {
_logger.info(String.format("BlockObject %s is a RecoverPoint bookmark. Exporting associated lun %s instead of snap.", volUri, nativeId));
result = apiClient.exportLun(host, nativeId, newhlu);
} else {
result = apiClient.exportSnap(host, nativeId, newhlu);
setSnapWWN(apiClient, blockObject, nativeId);
}
mask.addVolume(volUri, result.getHlu());
if (result.isNewAccess()) {
mappedVolumes.add(volUri);
}
}
}
ExportOperationContext.insertContextOperation(taskCompleter, VNXeExportOperationContext.OPERATION_ADD_VOLUMES_TO_HOST_EXPORT, mappedVolumes);
mask.setNativeId(host.getId());
_dbClient.updateObject(mask);
taskCompleter.ready(_dbClient);
} catch (Exception e) {
_logger.error("Unexpected error: createExportMask failed.", e);
ServiceError error = DeviceControllerErrors.vnxe.jobFailed("createExportMask", e.getMessage());
taskCompleter.error(_dbClient, error);
}
_logger.info("{} createExportMask END...", storage.getSerialNumber());
}
use of com.emc.storageos.volumecontroller.impl.utils.ExportOperationContext in project coprhd-controller by CoprHD.
the class VPlexDeviceController method storageViewRemoveInitiators.
/**
* Workflow step to remove an initiator from a single Storage View as given by the ExportMask URI.
* Note there is a dependence on ExportMask name equaling the Storage View name.
* Note that arguments must match storageViewRemoveInitiatorsMethod above (except stepId).
*
* @param vplexURI
* -- URI of Vplex Storage System.
* @param exportGroupURI
* -- URI of Export Group.
* @param exportMaskURI
* -- URI of one ExportMask. Call only processes indicaated mask.
* @param initiatorURIs
* -- URIs of Initiators to be removed.
* @param targetURIs
* -- optional targets to be removed from the Storage View.
* If non null, a list of URIs for VPlex front-end ports that will be removed from Storage View.
* @param taskCompleter
* -- the task completer, used to find the rollback context,
* which will be non-null in the case of rollback
* @param rollbackContextKey
* context key for rollback processing
* @param stepId
* -- Workflow step id.
* @throws WorkflowException
*/
public void storageViewRemoveInitiators(URI vplexURI, URI exportGroupURI, URI exportMaskURI, List<URI> initiatorURIs, List<URI> targetURIs, TaskCompleter taskCompleter, String rollbackContextKey, String stepId) throws WorkflowException {
ExportMaskRemoveInitiatorCompleter completer = null;
try {
WorkflowStepCompleter.stepExecuting(stepId);
List<URI> initiatorIdsToProcess = new ArrayList<>(initiatorURIs);
completer = new ExportMaskRemoveInitiatorCompleter(exportGroupURI, exportMaskURI, initiatorURIs, stepId);
StorageSystem vplex = getDataObject(StorageSystem.class, vplexURI, _dbClient);
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplex, _dbClient);
String vplexClusterName = VPlexUtil.getVplexClusterName(exportMask, vplexURI, client, _dbClient);
Map<String, String> targetPortMap = VPlexControllerUtils.getTargetPortToPwwnMap(client, vplexClusterName);
VPlexStorageViewInfo storageView = client.getStorageView(vplexClusterName, exportMask.getMaskName());
_log.info("Refreshing ExportMask {}", exportMask.getMaskName());
VPlexControllerUtils.refreshExportMask(_dbClient, storageView, exportMask, targetPortMap, _networkDeviceController);
// get the context from the task completer, in case this is a rollback.
if (taskCompleter != null && rollbackContextKey != null) {
ExportOperationContext context = (ExportOperationContext) WorkflowService.getInstance().loadStepData(rollbackContextKey);
if (context != null) {
// a non-null context means this step is running as part of a rollback.
List<URI> addedInitiators = new ArrayList<>();
if (context.getOperations() != null) {
_log.info("Handling removeInitiators as a result of rollback");
ListIterator<ExportOperationContextOperation> li = context.getOperations().listIterator(context.getOperations().size());
while (li.hasPrevious()) {
ExportOperationContextOperation operation = (ExportOperationContextOperation) li.previous();
if (operation != null && VplexExportOperationContext.OPERATION_ADD_INITIATORS_TO_STORAGE_VIEW.equals(operation.getOperation())) {
addedInitiators = (List<URI>) operation.getArgs().get(0);
_log.info("Removing initiators {} as part of rollback", Joiner.on(',').join(addedInitiators));
}
}
}
// Update the initiators in the task completer such that we update the export mask/group correctly
for (URI initiator : initiatorIdsToProcess) {
if (addedInitiators == null || !addedInitiators.contains(initiator)) {
completer.removeInitiator(initiator);
}
}
if (addedInitiators == null || addedInitiators.isEmpty()) {
_log.info("There was no context found for add initiator. So there is nothing to rollback.");
completer.ready(_dbClient);
return;
}
// Change the list of initiators to process to the list
// that successfully were added during addInitiators.
initiatorIdsToProcess.clear();
initiatorIdsToProcess.addAll(addedInitiators);
}
}
// validate the remove initiator operation against the export mask volumes
List<URI> volumeURIList = (exportMask.getUserAddedVolumes() != null) ? URIUtil.toURIList(exportMask.getUserAddedVolumes().values()) : new ArrayList<URI>();
if (volumeURIList.isEmpty()) {
_log.warn("volume URI list for validating remove initiators is empty...");
}
ExportMaskValidationContext ctx = new ExportMaskValidationContext();
ctx.setStorage(vplex);
ctx.setExportMask(exportMask);
ctx.setBlockObjects(volumeURIList, _dbClient);
ctx.setAllowExceptions(!WorkflowService.getInstance().isStepInRollbackState(stepId));
validator.removeInitiators(ctx).validate();
// Invoke artificial failure to simulate invalid storageview name on vplex
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_060);
// removing all storage ports but leaving the existing initiators and volumes.
if (!exportMask.hasAnyExistingInitiators() && !exportMask.hasAnyExistingVolumes()) {
if (targetURIs != null && targetURIs.isEmpty() == false) {
List<PortInfo> targetPortInfos = new ArrayList<PortInfo>();
List<URI> targetsAddedToStorageView = new ArrayList<URI>();
for (URI target : targetURIs) {
// Do not try to remove a port twice.
if (!exportMask.getStoragePorts().contains(target.toString())) {
continue;
}
// Build the PortInfo structure for the port to be added
StoragePort port = getDataObject(StoragePort.class, target, _dbClient);
PortInfo pi = new PortInfo(port.getPortNetworkId().toUpperCase().replaceAll(":", ""), null, port.getPortName(), null);
targetPortInfos.add(pi);
targetsAddedToStorageView.add(target);
}
if (!targetPortInfos.isEmpty()) {
// Remove the targets from the VPLEX
client.removeTargetsFromStorageView(exportMask.getMaskName(), targetPortInfos);
}
}
}
// Update the initiators in the ExportMask.
List<PortInfo> initiatorPortInfo = new ArrayList<PortInfo>();
for (URI initiatorURI : initiatorIdsToProcess) {
Initiator initiator = getDataObject(Initiator.class, initiatorURI, _dbClient);
// We don't want to remove existing initiator, unless this is a rollback step
if (exportMask.hasExistingInitiator(initiator) && !WorkflowService.getInstance().isStepInRollbackState(stepId)) {
continue;
}
PortInfo portInfo = new PortInfo(initiator.getInitiatorPort().toUpperCase().replaceAll(":", ""), initiator.getInitiatorNode().toUpperCase().replaceAll(":", ""), initiator.getLabel(), getVPlexInitiatorType(initiator));
initiatorPortInfo.add(portInfo);
}
// Remove the initiators if there aren't any existing volumes, unless this is a rollback step or validation is disabled.
if (!initiatorPortInfo.isEmpty() && (!exportMask.hasAnyExistingVolumes() || !validatorConfig.isValidationEnabled() || WorkflowService.getInstance().isStepInRollbackState(stepId))) {
String lockName = null;
boolean lockAcquired = false;
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
String clusterId = ConnectivityUtil.getVplexClusterForVarray(exportGroup.getVirtualArray(), vplexURI, _dbClient);
lockName = _vplexApiLockManager.getLockName(vplexURI, clusterId);
lockAcquired = _vplexApiLockManager.acquireLock(lockName, LockTimeoutValue.get(LockType.VPLEX_API_LIB));
if (!lockAcquired) {
throw VPlexApiException.exceptions.couldNotObtainConcurrencyLock(vplex.getLabel());
}
// Remove the targets from the VPLEX
// Test mechanism to invoke a failure. No-op on production systems.
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_016);
client.removeInitiatorsFromStorageView(exportMask.getMaskName(), vplexClusterName, initiatorPortInfo);
} finally {
if (lockAcquired) {
_vplexApiLockManager.releaseLock(lockName);
}
}
}
completer.ready(_dbClient);
} catch (VPlexApiException vae) {
_log.error("Exception removing initiator from Storage View: " + vae.getMessage(), vae);
failStep(completer, stepId, vae);
} catch (Exception ex) {
_log.error("Exception removing initiator from Storage View: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.DELETE_STORAGE_VIEW_INITIATOR.getName();
ServiceError serviceError = VPlexApiException.errors.storageViewRemoveInitiatorFailed(opName, ex);
failStep(completer, stepId, serviceError);
}
}
Aggregations