use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method applyVolumesToMasksUsingRule.
/**
* Apply business rules to "add" volumes to specific export masks.
*
* Currently implemented rules:
* Rule 1. If you find an exact match of your volume with a mask's policy, use it.
* Rule 2. If you find a mask with multiple policies using cascaded storage groups, use it.
* Rule 3. If you find a mask with non-cascading storage group and a non-fast SG, use it.
* (phantom will be searched/created in this case)
*
* @param exportGroup export group
* @param token task id
* @param existingMasksToUpdateWithNewVolumes masks to update with new volumes if criteria is met
* @param volumesWithNoMask a list that empties as we find homes for volumes
* @param masks masks associated with the initiator
* @param maskToInitiatorsMap map of export masks to the initiators they need to cover
* @param partialMasks list of masks that contain a subset of initiators for the compute resource requested
* @param rule rule number from above
* @return true if the task succeeded to search for homes for all the volumes. false if a fatal error occurred.
*/
private boolean applyVolumesToMasksUsingRule(ExportGroup exportGroup, String token, Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes, Map<URI, Map<URI, Integer>> volumesWithNoMask, Map<ExportMask, ExportMaskPolicy> masks, Map<URI, Set<Initiator>> maskToInitiatorsMap, Set<URI> partialMasks, int rule, boolean isVMAX3) {
// populate a map of mask to initiator ID for the analysis loop.
Map<URI, Set<URI>> maskToInitiatorsToAddMap = new HashMap<URI, Set<URI>>();
if (maskToInitiatorsMap != null) {
for (Entry<URI, Set<Initiator>> entry : maskToInitiatorsMap.entrySet()) {
for (Initiator initiator : entry.getValue()) {
if (!maskToInitiatorsToAddMap.containsKey(entry.getKey())) {
maskToInitiatorsToAddMap.put(entry.getKey(), new HashSet<URI>());
}
maskToInitiatorsToAddMap.get(entry.getKey()).add(initiator.getId());
}
}
}
ListMultimap<URI, URI> volumesWithMask = ArrayListMultimap.create();
for (ExportMask mask : ExportMaskUtils.sortMasksByEligibility(masks, exportGroup)) {
// We need to see if the volume also exists the mask,
// if it doesn't then we'll add it to the list of volumes to add.
ExportMaskPolicy policy = masks.get(mask);
for (URI initiatorId : volumesWithNoMask.keySet()) {
// Check to ensure the initiator is in this mask or in the list of initiators we intend to add to this mask.
if ((mask.getInitiators() == null || !mask.getInitiators().contains(initiatorId.toString())) && (!maskToInitiatorsToAddMap.containsKey(mask.getId()) || !maskToInitiatorsToAddMap.get(mask.getId()).contains(initiatorId))) {
continue;
}
Map<URI, VirtualPool> uriVirtualPoolMap = new HashMap<URI, VirtualPool>();
for (URI boURI : volumesWithNoMask.get(initiatorId).keySet()) {
BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, boURI);
if (bo != null && !mask.hasExistingVolume(bo)) {
// object is already reflected that the volume is covered.
if (volumesWithMask.containsKey(initiatorId) && volumesWithMask.get(initiatorId).contains(boURI)) {
continue;
}
// Make sure the mask matches the fast policy of the volume
boolean match = false;
// set the match flag to true so the volumesWithMask will get marked properly.
if (existingMasksToUpdateWithNewVolumes.containsKey(mask.getId()) && existingMasksToUpdateWithNewVolumes.get(mask.getId()).containsKey(boURI)) {
match = true;
} else {
List<Initiator> initiators = _dbClient.queryObjectField(Initiator.class, "iniport", Arrays.asList(initiatorId));
Initiator initiator = initiators.get(0);
_log.info(String.format("Pre-existing Mask Rule %d: volume %s is not exposed to initiator %s in mask %s. Checking rule.", rule, bo.getLabel(), initiator.getInitiatorPort(), mask.getMaskName()));
// Check if the requested HLU for the volume is
// already taken by a pre-existing volume.
Integer requestedHLU = volumesWithNoMask.get(initiatorId).get(boURI);
StringMap existingVolumesInMask = mask.getExistingVolumes();
if (existingVolumesInMask != null && requestedHLU.intValue() != ExportGroup.LUN_UNASSIGNED && !ExportGroup.LUN_UNASSIGNED_DECIMAL_STR.equals(requestedHLU.toString()) && existingVolumesInMask.containsValue(requestedHLU.toString())) {
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
ServiceError serviceError = DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(boURI.toString(), requestedHLU.toString());
completer.error(_dbClient, serviceError);
return false;
}
String volumePolicyName = ControllerUtils.getAutoTieringPolicyName(bo.getId(), _dbClient);
if (volumePolicyName.equalsIgnoreCase(Constants.NONE.toString())) {
volumePolicyName = null;
}
VirtualPool virtualPool = null;
if (bo instanceof Volume) {
Volume volume = (Volume) bo;
virtualPool = uriVirtualPoolMap.get(volume.getVirtualPool());
if (virtualPool == null) {
virtualPool = _dbClient.queryObject(VirtualPool.class, volume.getVirtualPool());
uriVirtualPoolMap.put(volume.getVirtualPool(), virtualPool);
}
}
// The mask must contain all the initiators associated with the compute resource.
if (rule == 1) {
// 3. The volume is NON-FAST
if (!partialMasks.contains(mask.getId()) || partialMasksContainSameSG(partialMasks, masks, mask) || volumePolicyName == null) {
// Exact fit case, no FAST policy (or for case of VMAX3, the MV is associated to the Optimized SLO)
if (volumePolicyName == null && (policy.localTierPolicy == null || (isVMAX3 && policy.localTierPolicy.contains(Constants.OPTIMIZED_SLO)))) {
_log.info("Pre-existing Mask Matched rule 1B: volume and mask do not have FAST policy");
match = true;
}
// Exact fit case, FAST policy with non-cascading storage group
if (volumePolicyName != null) {
if (policy.localTierPolicy != null) {
if (isVMAX3) {
match = SmisUtils.checkPolicyMatchForVMAX3(policy.localTierPolicy, volumePolicyName);
} else {
match = policy.localTierPolicy.equalsIgnoreCase(volumePolicyName);
}
if (match) {
_log.info("Pre-existing Mask Matched rule 1C: volume has same FAST policy as masking view with non-cascading storage group");
}
}
// it's ours.
if (policy.localTierPolicy == null && policy.tierPolicies != null && policy.tierPolicies.size() == 1) {
if (isVMAX3) {
String policyName = policy.tierPolicies.iterator().next();
match = SmisUtils.checkPolicyMatchForVMAX3(policyName, volumePolicyName);
} else {
match = policy.tierPolicies.contains(volumePolicyName);
}
if (match) {
_log.info("Pre-existing Mask Matched rule 1D: volume has same FAST policy as masking view with cascading storage group");
}
}
}
// verify host io limits match if policy name is a match
if (virtualPool != null) {
match &= HostIOLimitsParam.isEqualsLimit(policy.getHostIOLimitBandwidth(), virtualPool.getHostIOLimitBandwidth()) && HostIOLimitsParam.isEqualsLimit(policy.getHostIOLimitIOPs(), virtualPool.getHostIOLimitIOPs());
}
} else {
_log.info("Pre-existing Mask did not match rule 1A: volume is FAST, mask comprises only part of the compute resource, and the storage groups in each mask are not the same. " + "Attempting to use this mask would cause a violation on the VMAX since the same volume can not be in more than one storage group with a FAST policy defined.");
}
}
// and it's using cascading storage groups.
if (rule == 2) {
// VMAX3: Phantom SGs are not created for VMAX3, so ignore the mask
if (!policy.simpleMask && checkIfRule2SatisfiesForVMAX3(isVMAX3, policy)) {
_log.info("Pre-existing mask Matched rule 2A: volume has FAST policy and masking view has cascaded storage group");
// hence verify if IO limit set on the SG within MV if not we need to create a new Masking view.
if (ExportMaskPolicy.EXPORT_TYPE.PHANTOM.name().equalsIgnoreCase(policy.getExportType())) {
if (virtualPool != null) {
if (HostIOLimitsParam.isEqualsLimit(policy.getHostIOLimitBandwidth(), virtualPool.getHostIOLimitBandwidth()) && HostIOLimitsParam.isEqualsLimit(policy.getHostIOLimitIOPs(), virtualPool.getHostIOLimitIOPs())) {
_log.info("Pre-existing mask Matched rule 2A-1: Phantom SGs are available to add FAST volumes to this masking view, and expected HostIO limit is set on SG within masking view.");
match = true;
} else {
_log.info("Pre-existing mask did not match rule 2A-1: Phantom SGs are available to add FAST volumes to this masking view, but HostIO limit is not set on SG within masking view.");
}
}
} else {
match = true;
}
} else {
if (volumePolicyName == null) {
_log.info("Pre-existing mask did not match rule 2A: volume does not have a FAST policy, and this rules requires the volume to have a FAST policy associated with it");
}
if (policy.simpleMask) {
_log.info("Pre-existing mask did not match rule 2A: mask has a cascaded storage group, and this rule requires the storage group be non-cascaded in the mask");
}
}
}
// we're capable of creating phantom storage groups.
if (!isVMAX3 && rule == 3) {
if (volumePolicyName != null) {
if ((policy.tierPolicies == null || policy.tierPolicies.isEmpty()) && policy.simpleMask) {
_log.info("Pre-existing mask Matched rule 3A: volume has non-cascaded, non-FAST storage group, allowing VipR to make/use island storage groups for FAST");
match = true;
// verify host io limits match if policy name is a match
if (virtualPool != null) {
match = HostIOLimitsParam.isEqualsLimit(policy.getHostIOLimitBandwidth(), virtualPool.getHostIOLimitBandwidth()) && HostIOLimitsParam.isEqualsLimit(policy.getHostIOLimitIOPs(), virtualPool.getHostIOLimitIOPs());
}
} else {
_log.info("Pre-existing mask did not match rule 3A: volume is FAST and mask does not have a non-cascaded, non-FAST storage group. A non-cascaded, non-FAST storage group in the masking view allows ViPR to " + "create or use a separate island storage group for FAST volumes");
}
} else {
_log.info("Pre-existing mask did not match rule 3A: volume does not have a FAST policy, and this rule requires the volume to have a FAST policy associated with it");
}
}
if (match) {
_log.info(String.format("Found that we can add volume %s to export mask %s", bo.getLabel(), mask.getMaskName()));
// The volume doesn't exist, so we have to add it to
// the masking container.
Map<URI, Integer> newVolumes = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (newVolumes == null) {
newVolumes = new HashMap<URI, Integer>();
existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumes);
}
// Check to see if the volume is already in this mask. (Map hashcode not finding existing volume URIs)
if (!newVolumes.containsKey(bo.getId())) {
newVolumes.put(bo.getId(), requestedHLU);
mask.addToUserCreatedVolumes(bo);
} else {
_log.info(String.format("Found we already have volume %s in the list for mask %s", bo.getLabel(), mask.getMaskName()));
}
}
}
if (match) {
// We found a mask for this volume, remove from the no-mask-yet list
volumesWithMask.put(initiatorId, boURI);
}
} else if (mask.hasExistingVolume(bo)) {
// We found a mask for this volume, remove from the no-mask-yet list
_log.info(String.format("rule %d: according to the database, volume %s is already in the mask: %s", rule, bo.getWWN(), mask.getMaskName()));
volumesWithMask.put(initiatorId, boURI);
}
}
}
// Update the list of volumes and initiators for the mask
Map<URI, Integer> volumeMapForExistingMask = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) {
mask.addVolumes(volumeMapForExistingMask);
}
// Remove the entries from the no-mask-yet map
for (Entry<URI, Collection<URI>> entry : volumesWithMask.asMap().entrySet()) {
URI initiatorId = entry.getKey();
if (volumesWithNoMask != null && volumesWithNoMask.get(initiatorId) != null) {
for (URI boId : entry.getValue()) {
if (volumesWithNoMask.get(initiatorId) != null) {
volumesWithNoMask.get(initiatorId).remove(boId);
if (volumesWithNoMask.get(initiatorId).isEmpty()) {
volumesWithNoMask.remove(initiatorId);
}
}
}
}
}
}
return true;
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VnxMaskingOrchestrator method determineExportGroupCreateSteps.
/**
* Routine contains logic to create an export mask on the array
*
* @param workflow
* - Workflow object to create steps against
* @param previousStep
* - [optional] Identifier of workflow step to wait for
* @param device
* - BlockStorageDevice implementation
* @param storage
* - StorageSystem object representing the underlying array
* @param exportGroup
* - ExportGroup object representing Bourne-level masking
* @param initiatorURIs
* - List of Initiator URIs
* @param volumeMap
* - Map of Volume URIs to requested Integer HLUs
* @param zoningStepNeeded
* - No specific logic required for VNX as zoning is taken care of already.
* @param token
* - Identifier for the operation
* @throws Exception
*/
@Override
public boolean determineExportGroupCreateSteps(Workflow workflow, String waitFor, BlockStorageDevice device, StorageSystem storage, ExportGroup exportGroup, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, boolean zoningStepNeeded, String token) throws Exception {
// If we didn't create any workflows by the end of this method, we can return an appropriate exception (instead
// of the Task just hanging)
String previousStep = waitFor;
boolean flowCreated = false;
Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.addAll(volumeMap.keySet());
Map<URI, URI> hostToExistingExportMaskMap = new HashMap<URI, URI>();
List<URI> hostURIs = new ArrayList<URI>();
List<String> portNames = new ArrayList<String>();
// Populate the port WWN/IQNs (portNames) and the
// mapping of the WWN/IQNs to Initiator URIs
processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
// We always want to have the full list of initiators for the hosts involved in
// this export. This will allow the export operation to always find any
// existing exports for a given host.
queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);
// Find the export masks that are associated with any or all the ports in
// portNames. We will have to do processing differently based on whether
// or there is an existing ExportMasks.
Map<String, Set<URI>> matchingExportMaskURIs = device.findExportMasks(storage, portNames, false);
/**
* COP-28674: During Vblock boot volume export, if existing masking views are found then check for existing volumes
* If found throw exception. This condition is valid only for boot volume vblock export.
*/
if (exportGroup.forHost() && ExportMaskUtils.isVblockHost(initiatorURIs, _dbClient) && ExportMaskUtils.isBootVolume(_dbClient, volumeMap)) {
_log.info("VBlock boot volume Export: Validating the storage system {} to find existing storage groups", storage.getNativeGuid());
if (CollectionUtils.isEmpty(matchingExportMaskURIs)) {
_log.info("No existing masking views found, passed validation..");
} else {
List<String> maskNames = new ArrayList<String>();
for (Entry<String, Set<URI>> maskEntry : matchingExportMaskURIs.entrySet()) {
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, maskEntry.getValue());
if (!CollectionUtils.isEmpty(masks)) {
for (ExportMask mask : masks) {
maskNames.add(mask.getMaskName());
}
}
}
InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
Map<String, List<URI>> initiatorToComputeResourceMap = initiatorHelper.getResourceToInitiators();
Set<String> computeResourceSet = initiatorToComputeResourceMap.keySet();
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
ServiceError serviceError = DeviceControllerException.errors.existingMaskFoundDuringBootVolumeExport(Joiner.on(",").join(maskNames), computeResourceSet.iterator().next());
completer.error(_dbClient, serviceError);
return false;
}
} else {
_log.info("VBlock Boot volume Export Validation : Skipping");
}
findAndUpdateFreeHLUsForClusterExport(storage, exportGroup, initiatorURIs, volumeMap);
if (matchingExportMaskURIs.isEmpty()) {
previousStep = checkForSnapshotsToCopyToTarget(workflow, storage, previousStep, volumeMap, null);
_log.info(String.format("No existing mask found w/ initiators { %s }", Joiner.on(",").join(portNames)));
createNewExportMaskWorkflowForInitiators(initiatorURIs, exportGroup, workflow, volumeMap, storage, token, previousStep);
flowCreated = true;
} else {
_log.info(String.format("Mask(s) found w/ initiators {%s}. " + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}", Joiner.on(",").join(portNames), Joiner.on(",").join(matchingExportMaskURIs.keySet()), Joiner.on(",").join(portNameToInitiatorURI.entrySet())));
// There are some initiators that already exist. We need to create a
// workflow that create new masking containers or updates masking
// containers as necessary.
// These data structures will be used to track new initiators - ones
// that don't already exist on the array
List<URI> initiatorURIsCopy = new ArrayList<URI>();
initiatorURIsCopy.addAll(initiatorURIs);
// This loop will determine a list of volumes to update per export mask
Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
URI initiatorURI = portNameToInitiatorURI.get(entry.getKey());
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
// Keep track of those initiators that have been found to exist already
// in some export mask on the array
initiatorURIsCopy.remove(initiatorURI);
// Get a list of the ExportMasks that were matched to the initiator
List<URI> exportMaskURIs = new ArrayList<URI>();
exportMaskURIs.addAll(entry.getValue());
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
_log.info(String.format("initiator %s masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
for (ExportMask mask : masks) {
_log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
getDevice().refreshExportMask(storage, mask);
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
_log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
if (mask.getCreatedBySystem()) {
_log.info(String.format("initiator %s is in persisted mask %s", initiator.getInitiatorPort(), mask.getMaskName()));
// in our export group, because we would simply add to them.
if (mask.getInitiators() != null) {
for (String existingMaskInitiatorStr : mask.getInitiators()) {
// Now look at it from a different angle. Which one of our export group initiators
// are NOT in the current mask? And if so, if it belongs to the same host as an existing
// one,
// we should add it to this mask.
Iterator<URI> initiatorIter = initiatorURIsCopy.iterator();
while (initiatorIter.hasNext()) {
Initiator initiatorCopy = _dbClient.queryObject(Initiator.class, initiatorIter.next());
if (initiatorCopy != null && initiatorCopy.getId() != null && !mask.hasInitiator(initiatorCopy.getId().toString())) {
Initiator existingMaskInitiator = _dbClient.queryObject(Initiator.class, URI.create(existingMaskInitiatorStr));
if (existingMaskInitiator != null && initiatorCopy.getHost() != null && initiatorCopy.getHost().equals(existingMaskInitiator.getHost())) {
// Add to the list of initiators we need to add to this mask
Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (existingMaskInitiators == null) {
existingMaskInitiators = new HashSet<Initiator>();
existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
}
existingMaskInitiators.add(initiatorCopy);
// remove this from the list of initiators we'll
initiatorIter.remove();
// make a new mask from
}
}
}
}
}
} else {
// Insert this initiator into the mask's list of initiators managed by the system.
// This will get persisted below.
mask.addInitiator(initiator);
if (!NullColumnValueGetter.isNullURI(initiator.getHost())) {
hostToExistingExportMaskMap.put(initiator.getHost(), mask.getId());
}
}
// if it doesn't then we'll add it to the list of volumes to add.
for (URI boURI : volumeURIs) {
BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, boURI);
if (bo != null && !mask.hasExistingVolume(bo)) {
_log.info(String.format("volume %s is not in mask %s", bo.getWWN(), mask.getMaskName()));
// The volume doesn't exist, so we have to add it to
// the masking container.
Map<URI, Integer> newVolumes = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (newVolumes == null) {
newVolumes = new HashMap<URI, Integer>();
existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumes);
}
// Check if the requested HLU for the volume is
// already taken by a pre-existing volume.
Integer requestedHLU = volumeMap.get(boURI);
StringMap existingVolumesInMask = mask.getExistingVolumes();
if (existingVolumesInMask != null && requestedHLU.intValue() != ExportGroup.LUN_UNASSIGNED && !ExportGroup.LUN_UNASSIGNED_DECIMAL_STR.equals(requestedHLU.toString()) && existingVolumesInMask.containsValue(requestedHLU.toString())) {
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
ServiceError serviceError = DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(boURI.toString(), requestedHLU.toString());
completer.error(_dbClient, serviceError);
return false;
}
newVolumes.put(bo.getId(), requestedHLU);
mask.addToUserCreatedVolumes(bo);
} else if (bo != null && mask.hasExistingVolume(bo)) {
_log.info(String.format("volume %s is already in mask %s. Removing it from mask's existing volumes and adding to user created volumes", bo.getWWN(), mask.getMaskName()));
String hlu = mask.getExistingVolumes().get(BlockObject.normalizeWWN(bo.getWWN()));
mask.removeFromExistingVolumes(bo);
mask.addVolume(bo.getId(), Integer.parseInt(hlu));
mask.addToUserCreatedVolumes(bo);
}
}
// Update the list of volumes and initiators for the mask
Map<URI, Integer> volumeMapForExistingMask = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) {
mask.addVolumes(volumeMapForExistingMask);
}
Set<Initiator> initiatorSetForExistingMask = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (initiatorSetForExistingMask != null && initiatorSetForExistingMask.isEmpty()) {
mask.addInitiators(initiatorSetForExistingMask);
}
updateZoningMap(exportGroup, mask);
_dbClient.updateAndReindexObject(mask);
// TODO: All export group modifications should be moved to completers
exportGroup.addExportMask(mask.getId());
_dbClient.updateAndReindexObject(exportGroup);
}
}
// The initiatorURIsCopy was used in the foreach initiator loop to see
// which initiators already exist in a mask. If it is non-empty,
// then it means there are initiators that are new,
// so let's add them to the main tracker
Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
if (!initiatorURIsCopy.isEmpty()) {
for (URI newExportMaskInitiator : initiatorURIsCopy) {
Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
List<URI> initiatorSet = hostInitiatorMap.get(initiator.getHost());
if (initiatorSet == null) {
initiatorSet = new ArrayList<URI>();
hostInitiatorMap.put(initiator.getHost(), initiatorSet);
}
initiatorSet.add(initiator.getId());
_log.info(String.format("host = %s, " + "initiators to add: %d, " + "existingMasksToUpdateWithNewVolumes.size = %d", initiator.getHost(), hostInitiatorMap.get(initiator.getHost()).size(), existingMasksToUpdateWithNewVolumes.size()));
}
}
_log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
previousStep = checkForSnapshotsToCopyToTarget(workflow, storage, previousStep, volumeMap, existingMasksToUpdateWithNewVolumes.values());
// and/or add volumes to existing masks.
if (!hostInitiatorMap.isEmpty()) {
for (URI hostID : hostInitiatorMap.keySet()) {
// associated with that host to the list
if (hostToExistingExportMaskMap.containsKey(hostID)) {
URI existingExportMaskURI = hostToExistingExportMaskMap.get(hostID);
Set<Initiator> toAddInits = new HashSet<Initiator>();
List<URI> hostInitaitorList = hostInitiatorMap.get(hostID);
for (URI initURI : hostInitaitorList) {
Initiator initiator = _dbClient.queryObject(Initiator.class, initURI);
if (!initiator.getInactive()) {
toAddInits.add(initiator);
}
}
_log.info(String.format("Need to add new initiators to existing mask %s, %s", existingExportMaskURI.toString(), Joiner.on(',').join(hostInitaitorList)));
existingMasksToUpdateWithNewInitiators.put(existingExportMaskURI, toAddInits);
continue;
}
// We have some brand new initiators, let's add them to new masks
_log.info(String.format("new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID))));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, hostInitiatorMap.get(hostID), volumeMap, token);
previousStep = result.getStepId();
flowCreated = true;
}
}
for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Map<URI, Integer> volumesToAdd = entry.getValue();
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToAdd, null);
flowCreated = true;
}
for (Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Set<Initiator> initiatorsToAdd = entry.getValue();
List<URI> initiatorsURIs = new ArrayList<URI>();
for (Initiator initiator : initiatorsToAdd) {
initiatorsURIs.add(initiator.getId());
}
_log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
// To make the right pathing assignments, send down the volumes we are going to add to this mask, if
// available.
previousStep = generateExportMaskAddInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, existingMasksToUpdateWithNewVolumes.get(entry.getKey()) != null ? existingMasksToUpdateWithNewVolumes.get(entry.getKey()).keySet() : null, token);
flowCreated = true;
}
}
return flowCreated;
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class XIVMaskingOrchestrator method exportGroupAddInitiators.
@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
BlockStorageDevice device = getDevice();
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);
Map<URI, List<URI>> zoneMasksToInitiatorsURIs = new HashMap<URI, List<URI>>();
Map<URI, Map<URI, Integer>> zoneNewMasksToVolumeMap = new HashMap<URI, Map<URI, Integer>>();
List<URI> hostURIs = new ArrayList<URI>();
Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
List<String> portNames = new ArrayList<String>();
// Populate the port WWN/IQNs (portNames) and the
// mapping of the WWN/IQNs to Initiator URIs
processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
// Populate a map of volumes on the storage device
List<BlockObject> blockObjects = new ArrayList<BlockObject>();
Map<URI, Integer> volumeMap = new HashMap<URI, Integer>();
if (exportGroup.getVolumes() != null) {
for (Map.Entry<String, String> entry : exportGroup.getVolumes().entrySet()) {
URI boURI = URI.create(entry.getKey());
Integer hlu = Integer.valueOf(entry.getValue());
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
if (bo.getStorageController().equals(storageURI)) {
volumeMap.put(boURI, hlu);
blockObjects.add(bo);
}
}
}
// We always want to have the full list of initiators for the hosts involved in
// this export. This will allow the export operation to always find any
// existing exports for a given host.
queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);
boolean anyOperationsToDo = false;
Map<String, Set<URI>> matchingExportMaskURIs = device.findExportMasks(storage, portNames, false);
// Need to maintain separate Export mask for Cluster and Host.
// So remove off the Export mask not matching to the Export Group.
filterExportMaskForGroup(exportGroup, matchingExportMaskURIs);
if (!matchingExportMaskURIs.isEmpty()) {
// There were some exports out there that already have some or all of the
// initiators that we are attempting to add. We need to only add
// volumes to those existing exports.
List<URI> initiatorURIsCopy = new ArrayList<URI>();
initiatorURIsCopy.addAll(initiatorURIs);
// This loop will determine a list of volumes to update per export mask
Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
URI initiatorURI = portNameToInitiatorURI.get(entry.getKey());
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
initiatorURIsCopy.remove(initiatorURI);
// Get a list of the ExportMasks that were matched to the initiator
List<URI> exportMaskURIs = new ArrayList<URI>();
exportMaskURIs.addAll(entry.getValue());
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
_log.info(String.format("initiator %s is in these masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
for (ExportMask mask : masks) {
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
_log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
_log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
if (!mask.getInactive() && mask.getStorageDevice().equals(storageURI)) {
// already in the masks to the placement list
for (BlockObject blockObject : blockObjects) {
if (!mask.hasExistingVolume(blockObject.getWWN()) && !mask.hasUserAddedVolume(blockObject.getWWN())) {
Map<URI, Integer> newVolumesMap = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (newVolumesMap == null) {
newVolumesMap = new HashMap<URI, Integer>();
existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumesMap);
}
newVolumesMap.put(blockObject.getId(), volumeMap.get(blockObject.getId()));
}
}
if (mask.getCreatedBySystem()) {
// in our export group, because we would simply add to them.
if (mask.getInitiators() != null) {
for (String existingMaskInitiatorStr : mask.getInitiators()) {
// Now look at it from a different angle. Which one of our export group initiators
// are NOT in the current mask? And if so, if it belongs to the same host as an existing one,
// we should add it to this mask.
Iterator<URI> initiatorIter = initiatorURIsCopy.iterator();
while (initiatorIter.hasNext()) {
Initiator initiatorCopy = _dbClient.queryObject(Initiator.class, initiatorIter.next());
if (!mask.hasInitiator(initiatorCopy.getId().toString())) {
Initiator existingMaskInitiator = _dbClient.queryObject(Initiator.class, URI.create(existingMaskInitiatorStr));
if (initiatorCopy.getHost().equals(existingMaskInitiator.getHost())) {
// Add to the list of initiators we need to add to this mask
Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (existingMaskInitiators == null) {
existingMaskInitiators = new HashSet<Initiator>();
existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
}
if (!existingMaskInitiators.contains(initiatorCopy)) {
existingMaskInitiators.add(initiatorCopy);
}
// remove this from the list of initiators we'll make a new mask
initiatorIter.remove();
// from
}
}
}
}
}
} else {
// Insert this initiator into the mask's list of initiators managed by the system.
// This will get persisted below.
mask.addInitiator(initiator);
}
}
// Update the list of volumes and initiators for the mask
Map<URI, Integer> volumeMapForExistingMask = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) {
mask.addVolumes(volumeMapForExistingMask);
}
Set<Initiator> initiatorSetForExistingMask = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (initiatorSetForExistingMask != null && initiatorSetForExistingMask.isEmpty()) {
mask.addInitiators(initiatorSetForExistingMask);
}
updateZoningMap(exportGroup, mask);
_dbClient.updateAndReindexObject(mask);
// TODO: All export group modifications should be moved to completers
exportGroup.addExportMask(mask.getId());
_dbClient.updateAndReindexObject(exportGroup);
}
}
// The initiatorURIsCopy was used in the foreach initiator loop to see
// which initiators already exist in a mask. If it is non-empty,
// then it means there are initiators that are new,
// so let's add them to the main tracker
Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
if (!initiatorURIsCopy.isEmpty()) {
for (URI newExportMaskInitiator : initiatorURIsCopy) {
Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
List<URI> initiatorSet = hostInitiatorMap.get(initiator.getHost());
if (initiatorSet == null) {
initiatorSet = new ArrayList<URI>();
hostInitiatorMap.put(initiator.getHost(), initiatorSet);
}
initiatorSet.add(initiator.getId());
_log.info(String.format("host = %s, " + "initiators to add: %d, " + "existingMasksToUpdateWithNewVolumes.size = %d", initiator.getHost(), hostInitiatorMap.get(initiator.getHost()).size(), existingMasksToUpdateWithNewVolumes.size()));
}
}
for (URI host : hostInitiatorMap.keySet()) {
// Create two steps, one for Zoning, one for the ExportGroup actions.
// This step is for zoning. It is not specific to a single NetworkSystem,
// as it will look at all the initiators and targets and compute the
// zones required (which might be on multiple NetworkSystems.)
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, hostInitiatorMap.get(host), volumeMap, token);
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
anyOperationsToDo = true;
}
_log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
String attachGroupSnapshot = checkForSnapshotsToCopyToTarget(workflow, storage, null, volumeMap, existingMasksToUpdateWithNewVolumes.values());
// At this point we have a mapping of all the masks that we need to update with new volumes
// stepMap [URI, String] => [Export Mask URI, StepId of previous task i.e. Add volumes work flow.]
Map<URI, String> stepMap = new HashMap<URI, String>();
for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Map<URI, Integer> volumesToAdd = entry.getValue();
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.addAll(volumesToAdd.keySet());
stepMap.put(entry.getKey(), generateDeviceSpecificAddVolumeWorkFlow(workflow, attachGroupSnapshot, storage, exportGroup, mask, volumesToAdd, volumeURIs, null));
anyOperationsToDo = true;
}
// At this point we have a mapping of all the masks that we need to update with new initiators
for (Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Set<Initiator> initiatorsToAdd = entry.getValue();
List<URI> initiatorsURIs = new ArrayList<URI>();
for (Initiator initiator : initiatorsToAdd) {
initiatorsURIs.add(initiator.getId());
}
_log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
String previousStep = attachGroupSnapshot;
if (stepMap.get(entry.getKey()) != null) {
previousStep = stepMap.get(entry.getKey());
}
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
maskToInitiatorsMap.put(mask.getId(), initiatorURIs);
generateDeviceSpecificAddInitiatorWorkFlow(workflow, previousStep, storage, exportGroup, mask, Arrays.asList(), initiatorsURIs, maskToInitiatorsMap, token);
anyOperationsToDo = true;
}
} else {
// None of the initiators that we're trying to add exist on the
// array in some export. We need to find the ExportMask that was created by
// the system and add the new initiator(s) to it.
boolean foundASystemCreatedMask = false;
Map<String, List<URI>> hostInitiatorMap = new HashMap<String, List<URI>>();
if (!initiatorURIs.isEmpty()) {
for (URI newExportMaskInitiator : initiatorURIs) {
Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
if (initiator != null) {
String hostURIString = initiator.getHost().toString();
List<URI> initiatorSet = hostInitiatorMap.get(hostURIString);
if (initiatorSet == null) {
hostInitiatorMap.put(initiator.getHost().toString(), new ArrayList<URI>());
initiatorSet = hostInitiatorMap.get(hostURIString);
}
initiatorSet.add(initiator.getId());
_log.info(String.format("host = %s, " + "initiators to add: %d, ", initiator.getHost(), hostInitiatorMap.get(hostURIString).size()));
}
}
}
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup);
if (!exportMasks.isEmpty()) {
_log.info("There are export masks for this group. Adding initiators.");
// the storage system that were created by Bourne and are still active.
for (ExportMask exportMask : exportMasks) {
if (exportMask != null && !exportMask.getInactive() && exportMask.getStorageDevice().equals(storageURI) && exportMask.getCreatedBySystem()) {
List<URI> newInitiators = hostInitiatorMap.get(exportMask.getResource());
if (newInitiators != null && !newInitiators.isEmpty()) {
zoneMasksToInitiatorsURIs.put(exportMask.getId(), newInitiators);
generateDeviceSpecificExportMaskAddInitiatorsWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, exportMask, null, newInitiators, token);
foundASystemCreatedMask = true;
anyOperationsToDo = true;
}
}
}
}
if (!foundASystemCreatedMask) {
_log.info("There are no masks for this export. Need to create anew.");
for (String host : hostInitiatorMap.keySet()) {
// Zoning is done for the new masks identified i.e. zoneNewMasksToVolumeMap.
GenExportMaskCreateWorkflowResult result = generateDeviceSpecificExportMaskCreateWorkFlow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, hostInitiatorMap.get(host), volumeMap, token);
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
anyOperationsToDo = true;
}
}
}
if (anyOperationsToDo) {
if (!zoneNewMasksToVolumeMap.isEmpty()) {
List<URI> exportMaskList = new ArrayList<URI>();
exportMaskList.addAll(zoneNewMasksToVolumeMap.keySet());
Map<URI, Integer> overallVolumeMap = new HashMap<URI, Integer>();
for (Map<URI, Integer> oneVolumeMap : zoneNewMasksToVolumeMap.values()) {
overallVolumeMap.putAll(oneVolumeMap);
}
generateDeviceSpecificZoningCreateWorkflow(workflow, null, exportGroup, exportMaskList, overallVolumeMap);
}
if (!zoneMasksToInitiatorsURIs.isEmpty()) {
generateDeviceSpecificZoningAddInitiatorsWorkflow(workflow, null, exportGroup, zoneMasksToInitiatorsURIs);
}
String successMessage = String.format("Successfully exported to initiators on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class XtremIOMaskingOrchestrator method exportGroupRemoveVolumes.
@Override
public void exportGroupRemoveVolumes(URI storageURI, URI exportGroupURI, List<URI> volumes, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
log.info(String.format("exportRemoveVolume start - Array: %s ExportGroup: %s " + "Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumes)));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
// CTRL-13080 fix - Mask really not needed, this method has to get called on every export operation once.
if (exportMasks != null && !exportMasks.isEmpty()) {
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveVolumes", true, token);
List<ExportMask> exportMaskstoDelete = new ArrayList<ExportMask>();
List<ExportMask> exportMaskstoRemoveVolume = new ArrayList<ExportMask>();
String previousStep = null;
refreshExportMask(storage, getDevice(), exportMasks.get(0));
for (ExportMask exportMask : exportMasks) {
List<URI> maskVolumes = new ArrayList<URI>();
for (URI egVolumeID : volumes) {
BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, egVolumeID);
if (bo != null && exportMask.hasUserCreatedVolume(bo.getId())) {
maskVolumes.add(egVolumeID);
} else {
_log.info(String.format("Export mask %s does not contain system-created volume %s, so it will not be removed from this export mask", exportMask.getId().toString(), egVolumeID.toString()));
}
}
if (!maskVolumes.isEmpty()) {
if (isRemoveAllVolumes(exportMask, volumes)) {
exportMaskstoDelete.add(exportMask);
} else {
exportMaskstoRemoveVolume.add(exportMask);
}
}
}
if (!exportMaskstoRemoveVolume.isEmpty()) {
for (ExportMask exportMask : exportMaskstoRemoveVolume) {
List<URI> initiators = StringSetUtil.stringSetToUriList(exportMask.getInitiators());
previousStep = generateExportMaskRemoveVolumesWorkflow(workflow, previousStep, storage, exportGroup, exportMask, volumes, initiators, null);
}
previousStep = generateZoningRemoveVolumesWorkflow(workflow, previousStep, exportGroup, exportMaskstoRemoveVolume, volumes);
}
if (!exportMaskstoDelete.isEmpty()) {
for (ExportMask exportMask : exportMaskstoDelete) {
List<URI> volumesInMask = ExportMaskUtils.getUserAddedVolumeURIs(exportMask);
List<URI> initiators = StringSetUtil.stringSetToUriList(exportMask.getInitiators());
previousStep = generateExportMaskDeleteWorkflow(workflow, previousStep, storage, exportGroup, exportMask, volumesInMask, initiators, null);
}
previousStep = generateZoningDeleteWorkflow(workflow, previousStep, exportGroup, exportMaskstoDelete);
}
// volumes
List<URI> initiators = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
generateExportGroupRemoveVolumesCleanup(workflow, previousStep, storage, exportGroup, volumes, initiators);
String successMessage = String.format("Volumes successfully unexported from StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
log.info("export_volume_remove: no export (initiator should be empty)");
exportGroup.removeVolumes(volumes);
_dbClient.persistObject(exportGroup);
taskCompleter.ready(_dbClient);
}
log.info(String.format("exportRemoveVolume end - Array: %s ExportGroup: %s " + "Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumes)));
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportRemoveVolumes(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class XtremIOMaskingOrchestrator method exportGroupRemoveInitiators.
@Override
public void exportGroupRemoveInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
log.info(String.format("exportRemoveInitiator start - Array: %s " + "ExportGroup: %s Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
log.info("Host to initiators : {}", Joiner.on(",").join(computeResourceToInitiators.entrySet()));
refreshExportMask(storage, getDevice(), null);
if (exportMasks != null && !exportMasks.isEmpty()) {
// find the export mask which has the same Host name as the initiator
// Add the initiator to that export mask
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveInitiators", true, token);
// irrespective of cluster name, host will be always present
Map<String, URI> hostToEMaskGroup = ExportMaskUtils.mapHostToExportMask(_dbClient, exportGroup, storage.getId());
log.info("Host to ExportMask : {}", Joiner.on(",").join(hostToEMaskGroup.entrySet()));
// if export masks are found for the Host, then remove initiators from the export mask
// Export Masks are not shared between export Groups
// list of export masks from which initiators need to be removed
List<ExportMask> exportMaskRemoveInitiator = new ArrayList<ExportMask>();
// list of export masks to delete as all initiators are removed
List<ExportMask> exportMaskDelete = new ArrayList<ExportMask>();
// map of masks to initiators being removed needed to remove zones
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
String previousStep = null;
for (String computeKey : computeResourceToInitiators.keySet()) {
URI exportMaskUri = hostToEMaskGroup.get(computeKey);
if (null != exportMaskUri) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskUri);
if (exportMask.getStorageDevice().equals(storageURI)) {
List<Initiator> initiators = _dbClient.queryObject(Initiator.class, computeResourceToInitiators.get(computeKey));
List<Initiator> maskInitiators = new ArrayList<Initiator>();
for (Initiator initiator : initiators) {
if (exportMask.hasUserInitiator(initiator.getId())) {
maskInitiators.add(initiator);
} else {
_log.info(String.format("Initiator %s was not added by ViPR, so ViPR cannot remove it. No action will be taken for this initiator", initiator.getId()));
}
}
log.info("Processing export mask {} with initiators {}", storageURI, Joiner.on(",").join(maskInitiators));
if (!maskInitiators.isEmpty()) {
maskToInitiatorsMap.put(exportMask.getId(), Lists.newArrayList(Collections2.transform(maskInitiators, CommonTransformerFunctions.fctnDataObjectToID())));
if (isRemoveAllInitiators(exportMask, initiators)) {
exportMaskDelete.add(exportMask);
} else {
exportMaskRemoveInitiator.add(exportMask);
}
}
}
}
}
if (!exportMaskRemoveInitiator.isEmpty()) {
for (ExportMask exportMask : exportMaskRemoveInitiator) {
Collection<URI> volumeURIs = (Collections2.transform(exportMask.getUserAddedVolumes().values(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
previousStep = generateExportMaskRemoveInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, exportMask, new ArrayList<URI>(volumeURIs), initiatorURIs, true);
}
}
if (!exportMaskDelete.isEmpty()) {
for (ExportMask exportMask : exportMaskDelete) {
List<URI> volumesInMask = ExportMaskUtils.getUserAddedVolumeURIs(exportMask);
List<URI> initiators = maskToInitiatorsMap.get(exportMask.getId());
previousStep = generateExportMaskDeleteWorkflow(workflow, previousStep, storage, exportGroup, exportMask, volumesInMask, initiators, null);
}
}
if (!maskToInitiatorsMap.isEmpty()) {
previousStep = generateZoningRemoveInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
}
String successMessage = String.format("Initiators successfully removed from export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
log.info(String.format("exportRemoveInitiator end - Array: %s ExportGroup: " + "%s Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupRemoveInitiatorsFailed(e);
}
}
}
Aggregations