use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method exportGroupChangePolicyAndLimits.
@Override
public void exportGroupChangePolicyAndLimits(URI storageURI, URI exportMaskURI, URI exportGroupURI, List<URI> volumeURIs, URI newVpoolURI, boolean rollback, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
VirtualPool newVpool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
BlockStorageDevice device = getDevice();
device.updatePolicyAndLimits(storage, exportMask, volumeURIs, newVpool, rollback, taskCompleter);
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method determineExportGroupCreateSteps.
/**
* Routine contains logic to create an export mask on the array
*
* @param workflow - Workflow object to create steps against
* @param previousStep - [optional] Identifier of workflow step to wait for
* @param device - BlockStorageDevice implementation
* @param storage - StorageSystem object representing the underlying array
* @param exportGroup - ExportGroup object representing Bourne-level masking
* @param initiatorURIs - List of Initiator URIs
* @param volumeMap - Map of Volume URIs to requested Integer HLUs
* @param zoningStepNeeded - Determines whether zone step is needed
* @param token - Identifier for the operation
* @throws Exception
*/
@Override
public boolean determineExportGroupCreateSteps(Workflow workflow, String previousStep, BlockStorageDevice device, StorageSystem storage, ExportGroup exportGroup, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, boolean zoningStepNeeded, String token) throws Exception {
// If we didn't create any workflows by the end of this method, we can return an appropriate exception (instead of the Task just
// hanging)
boolean flowCreated = false;
InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
// Find the qualifying export masks that are associated with any or all the ports in
// portNames. We will have to do processing differently based on whether
// or there is an existing ExportMasks.
//
// In the case of clusters, we try to find the export mask that contains a subset of initiators
// of the cluster, so we can build onto it.
Set<URI> partialMasks = new HashSet<>();
/**
* For Cluster exports, we will not reuse any partial masking views. Masking view will only be reused if all the required cluster
* initiators are available in the existing masking view. This is to simplify the existing design.
* - If there are existing masking views already available, then we will not reuse it.
* - If there are masking views with all required cluster initiators, we will reuse it.
* - If there are masking views which has more than one Host in the Cluster but not all the hosts are part of it, then don't reuse.
* - If there are existing masking views with all the hosts in the Cluster, but few of the hosts doesn't contain all ViPR discovered
* initiators then don't reuse. Always try to create a new masking view for Cluster.
*
* Btw we consider only the host or cluster initiators connected to the network to be part of the given masking view.
* If ViPR discovered X initiators in CLuster and only X-n are connected to network,
* then we look for masking view with X-N initiators not X. Later during export the remaining initiators will be added to IG.
* The existing IG can be one single IG with more than one host or it could be IG per host with missing initiators.
* If X initiators are already available in the view, then we try to create a new masking view by reusing the IG.
* During reuse if the masking view creation fails with Initiator-port is already available, then user has to modify the existing
* initiator Group.
*/
Map<String, Set<URI>> matchingMasks = device.findExportMasks(storage, initiatorHelper.getPortNames(), exportGroup.forCluster());
Map<String, List<URI>> initiatorToComputeResourceMap = initiatorHelper.getResourceToInitiators();
Map<String, Set<URI>> initiatorToExportMaskPlacementMap = determineInitiatorToExportMaskPlacements(exportGroup, storage.getId(), initiatorToComputeResourceMap, matchingMasks, initiatorHelper.getPortNameToInitiatorURI(), partialMasks);
/**
* COP-28674: During Vblock boot volume export, if existing masking views are found then check for existing volumes
* If found throw exception. This condition is valid only for boot volume vblock export.
*/
if (exportGroup.forHost() && ExportMaskUtils.isVblockHost(initiatorURIs, _dbClient) && ExportMaskUtils.isBootVolume(_dbClient, volumeMap)) {
_log.info("VBlock boot volume Export: Validating the storage system {} to find existing masking views", storage.getNativeGuid());
if (CollectionUtils.isEmpty(matchingMasks)) {
_log.info("No existing masking views found, passed validation..");
} else {
List<String> maskNames = new ArrayList<String>();
for (Entry<String, Set<URI>> maskEntry : matchingMasks.entrySet()) {
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, maskEntry.getValue());
if (!CollectionUtils.isEmpty(masks)) {
for (ExportMask mask : masks) {
maskNames.add(mask.getMaskName());
}
}
}
Set<String> computeResourceSet = initiatorToComputeResourceMap.keySet();
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
ServiceError serviceError = DeviceControllerException.errors.existingMaskFoundDuringBootVolumeExport(Joiner.on(",").join(maskNames), computeResourceSet.iterator().next());
completer.error(_dbClient, serviceError);
return false;
}
} else {
_log.info("VBlock Boot volume Export Validation : Skipping");
}
/**
* To support multiple export for VMAX3 volumes with Host IO Limit, same Storage Group and
* Port Group should be used to create a new masking view. Re-using same storage group will
* lead to problems as it could have additional volumes. Also reusing a child storage group
* in a masking view to another masking view is not supported.
*/
if (storage.checkIfVmax3() && ExportUtils.checkIfvPoolHasHostIOLimitSet(_dbClient, volumeMap)) {
_log.info("Volumes have Host IO Limit set in virtual pools. Validating for multiple export..");
Map<String, List<URI>> storageGroupToVolumes = getDevice().groupVolumesByStorageGroupWithHostIOLimit(storage, volumeMap.keySet());
if (!storageGroupToVolumes.isEmpty()) {
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
ServiceError serviceError = DeviceControllerException.errors.cannotMultiExportVolumesWithHostIOLimit(Joiner.on(",").join(storageGroupToVolumes.keySet()), Joiner.on(",").join(storageGroupToVolumes.values()));
completer.error(_dbClient, serviceError);
return false;
}
}
findAndUpdateFreeHLUsForClusterExport(storage, exportGroup, initiatorURIs, volumeMap);
// as well. Certainly not as clearly as this will, but regardless.
if (initiatorToExportMaskPlacementMap.isEmpty()) {
_log.info(String.format("No existing mask found w/ initiators { %s }", Joiner.on(",").join(initiatorHelper.getPortNames())));
if (!initiatorURIs.isEmpty()) {
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
previousStep = result.getStepId();
flowCreated = true;
}
}
} else {
Map<URI, ExportMaskPolicy> policyCache = new HashMap<>();
_log.info(String.format("Mask(s) found w/ initiators {%s}. " + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}", Joiner.on(",").join(initiatorHelper.getPortNames()), Joiner.on(",").join(initiatorToExportMaskPlacementMap.values()), Joiner.on(",").join(initiatorHelper.getPortNameToInitiatorURI().entrySet())));
// There are some initiators that already exist. We need to create a
// workflow that create new masking containers or updates masking
// containers as necessary.
// These data structures will be used to track new initiators - ones
// that don't already exist on the array
List<URI> initiatorURIsCopy = new ArrayList<URI>();
initiatorURIsCopy.addAll(initiatorURIs);
// This loop will determine a list of volumes to update per export mask
Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
Set<URI> initiatorsForNewExport = new HashSet<>();
// have them yet. Find this condition and add the additional ports to the map.
if (exportGroup.forCluster() || exportGroup.forHost()) {
updatePlacementMapForCluster(exportGroup, initiatorHelper.getResourceToInitiators(), initiatorToExportMaskPlacementMap);
}
// to mark that these initiators need to be added to the existing masks.
for (Map.Entry<String, Set<URI>> entry : initiatorToExportMaskPlacementMap.entrySet()) {
URI initiatorURI = initiatorHelper.getPortNameToInitiatorURI().get(entry.getKey());
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
// Keep track of those initiators that have been found to exist already
// in some export mask on the array
initiatorURIsCopy.remove(initiatorURI);
List<URI> exportMaskURIs = new ArrayList<URI>();
exportMaskURIs.addAll(entry.getValue());
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
_log.info(String.format("initiator %s masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
// is not yet added to the mask. Note the masks were all refreshed by #device.findExportMasks() above
for (ExportMask mask : masks) {
_log.info(String.format("processing mask %s and initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
_log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
ExportMaskPolicy exportMaskDetails = getExportMaskPolicy(policyCache, device, storage, mask);
// Check if the ExportMask applies to more than one host. Since
// ViPR will be creating ExportMask per compute resource
// (single host or cluster), the only way that an existing mask
// applies to multiple hosts is when it was for a cluster
// export. If we find that to be the case,
// we should be able to create ExportMasks for it.
boolean hasMultipleHosts = maskAppliesToMultipleHosts(mask);
boolean createHostExportWhenClusterExportExists = (hasMultipleHosts && exportGroup.forHost());
// One node cluster Case - Always create a new MV if existing mask doesn't contain Cascaded IG.
boolean createClusterExportWhenHostExportExists = (exportGroup.forCluster() && !exportMaskDetails.isCascadedIG());
if (createClusterExportWhenHostExportExists || createHostExportWhenClusterExportExists) {
// It may turn out that we find these initiators already covered by a collection of
// masks for cluster purposes. If that's the case, we figure that out below and these
// "new" exports will never see the light of day.
_log.info("New export mask will be created for initiator {}", initiatorURI);
initiatorsForNewExport.add(initiatorURI);
// remove this mask from policyCache
policyCache.remove(mask.getId());
continue;
}
// in our export group, because we would simply add to them.
if (mask.getInitiators() != null) {
for (String existingMaskInitiatorStr : mask.getInitiators()) {
Initiator existingMaskInitiator = _dbClient.queryObject(Initiator.class, URI.create(existingMaskInitiatorStr));
// we should add it to this mask.
if ((initiator != null && initiator.getId() != null) && // and we don't have an entry already to add this initiator to the mask
(!existingMasksToUpdateWithNewInitiators.containsKey(mask.getId()) || !existingMasksToUpdateWithNewInitiators.get(mask.getId()).contains(initiator)) && // and the initiator exists in the first place
(existingMaskInitiator != null && // and this is a host export for this host, or...
(exportGroup.forHost() && initiator.getHost() != null && initiator.getHost().equals(existingMaskInitiator.getHost()) || // this is a cluster export for this cluster
(exportGroup.forCluster() && initiator.getClusterName() != null && initiator.getClusterName().equals(existingMaskInitiator.getClusterName()))))) {
// Add to the list of initiators we need to add to this mask
Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (existingMaskInitiators == null) {
existingMaskInitiators = new HashSet<Initiator>();
existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
}
// to the export group later.
if (!mask.hasInitiator(initiator.getId().toString())) {
existingMaskInitiators.add(initiator);
_log.info(String.format("initiator %s needs to be added to mask %s", initiator.getInitiatorPort(), mask.getMaskName()));
}
}
}
}
}
}
VmaxVolumeToExportMaskApplicatorContext context = createVmaxNativeApplicatorContext(workflow, exportGroup, storage, policyCache, zoningStepNeeded, token, initiatorHelper, initiatorToExportMaskPlacementMap, initiatorURIsCopy, partialMasks, volumeMap, initiatorsForNewExport, existingMasksToUpdateWithNewVolumes, existingMasksToUpdateWithNewInitiators, previousStep);
NativeVolumeToExportMaskRuleApplicator ruleApplicator = new NativeVolumeToExportMaskRuleApplicator(_dbClient, context);
ruleApplicator.run();
if (context.resultSuccess) {
// Set the flags that should have been filled in by NativeVolumeToExportMaskRuleApplicator running
previousStep = context.previousStep;
flowCreated = context.flowCreated;
} else {
_log.info("Failure in volume to ExportMask rules");
return false;
}
_log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
// or vice-versa.
if (!initiatorsForNewExport.isEmpty()) {
_log.info("Initiators for which new Export Mask will be created: {}", initiatorsForNewExport);
if (exportGroup.forCluster() && !initiatorURIsCopy.isEmpty()) {
// Clustered export group create request and there are essentially
// new and existing initiators. We'll take what's not already
// exported to and add it to the list of initiators to export
initiatorsForNewExport.addAll(initiatorURIsCopy);
// Clear the copy list because we're going to be creating exports
// for these. (There's code below that uses initiatorURIsCopy to
// determine what exports to update)
initiatorURIsCopy.clear();
}
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorsForNewExport);
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
flowCreated = true;
previousStep = result.getStepId();
if (zoningStepNeeded) {
String zoningStep = workflow.createStepId();
List<URI> masks = new ArrayList<URI>();
masks.add(result.getMaskURI());
previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, masks, volumeMap, zoningStep);
}
}
}
// The initiatorURIsCopy was used in the for each initiator loop to see
// which initiators already exist in a mask. If it is non-empty,
// then it means there are initiators that are new,
// so let's add them to the main tracker
Map<String, List<URI>> newComputeResources = mapInitiatorsToComputeResource(exportGroup, initiatorURIsCopy);
// and/or add volumes to existing masks.
if (newComputeResources != null && !newComputeResources.isEmpty()) {
for (Map.Entry<String, List<URI>> entry : newComputeResources.entrySet()) {
// We have some brand new initiators, let's add them to new masks
_log.info(String.format("New mask needed for compute resource %s", entry.getKey()));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, entry.getValue(), volumeMap, token);
flowCreated = true;
previousStep = result.getStepId();
// Add zoning step
if (zoningStepNeeded) {
String zoningStep = workflow.createStepId();
List<URI> masks = new ArrayList<URI>();
masks.add(result.getMaskURI());
previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, masks, volumeMap, zoningStep);
}
}
}
// Put volumes in the existing masks that need them.
for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
updateZoningMap(exportGroup, mask, true);
Map<URI, Integer> volumesToAdd = entry.getValue();
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(mask), new ArrayList<URI>(volumesToAdd.keySet()));
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToAdd, null);
flowCreated = true;
exportGroup.addExportMask(mask.getId());
_dbClient.updateObject(exportGroup);
}
// Put new initiators in existing masks that are missing them.
for (Map.Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
// modifying it or making it part of our export group.
if (!existingMasksToUpdateWithNewVolumes.containsKey(mask.getId())) {
_log.info(String.format("Not adding initiators to mask: %s because we found we don't need to change the mask", mask.getMaskName()));
continue;
}
updateZoningMap(exportGroup, mask, true);
exportGroup.addExportMask(mask.getId());
_dbClient.updateObject(exportGroup);
Set<Initiator> initiatorsToAdd = entry.getValue();
if (!initiatorsToAdd.isEmpty()) {
List<URI> initiatorsURIs = new ArrayList<URI>();
for (Initiator initiator : initiatorsToAdd) {
initiatorsURIs.add(initiator.getId());
}
_log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
maskToInitiatorsMap.put(mask.getId(), initiatorsURIs);
previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
previousStep = generateExportMaskAddInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, volumeMap.keySet(), token);
flowCreated = true;
}
}
}
// Catch if no flows were created; close off the task
if (!flowCreated) {
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
completer.ready(_dbClient);
return true;
}
return true;
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method applyVolumesToMasksUsingRPVMAXRules.
/**
* This method checks to see if the RP+VMAX best practice rules are followed.
*
* If host information is specified in the ExportGroup (this is the case when "Create Block volume for host" service catalog is chosen):
* a) Determine all the masking views corresponding to the compute resource.
* b) Determine, if any, all the RP masking views corresponding to the RP site specified.
* c) Compare the storage ports from the masking view of the compute resource and the RP masking view and see if there is a match.
* If a match is found, then return all the matching RP masking views.
* d) Returns an empty list of masks if there is no RP masking view that matches the given host masking view.
*
* If no compute resource information is specified in the ExportGroup, just returns an empty list of masks.
*
* This method also looks at existing RP masking view to check if those masks are intended for JOURNAL volumes only,
* If the ExportGroup is for RP_JOURNAL, then return the masking view with that contains the "journal" keyword in the mask name.
* Returns an empty list if no such masks are found and the ExportGroup specifies RP_JOURNAL.
*
* @param storage Storage system
* @param exportGroup ExportGroup
* @param masksMap Map of exportMask to policy
* @return Map of ExportMask to ExportPolicy, masks matching the above set of rules is returned based on whether ExportGroup specifies
* RP or RP_JOURNAL in the ExportGroup flags.
*/
private Map<ExportMask, ExportMaskPolicy> applyVolumesToMasksUsingRPVMAXRules(StorageSystem storage, ExportGroup exportGroup, Map<ExportMask, ExportMaskPolicy> masksMap) {
Map<ExportMask, ExportMaskPolicy> matchingMaskMap = new HashMap<ExportMask, ExportMaskPolicy>();
final String RECOVERPOINT_JOURNAL = "journal";
// If this an RP Export (non-journal) but there is no host information, return the existing maskMap.
if (exportGroup.checkInternalFlags(Flag.RECOVERPOINT) && !exportGroup.checkInternalFlags(Flag.RECOVERPOINT_JOURNAL) && exportGroup.getHosts() == null && exportGroup.getClusters() == null) {
_log.info("ExportGroup doesnt specify any hosts/clusters to which the volumes are exported, follow normal guidelines");
// To follow the normal guidelines, make sure we dont accidentally pick a Journal MV for a non-journal volume
for (Entry<ExportMask, ExportMaskPolicy> maskMap : masksMap.entrySet()) {
ExportMask rpMaskingView = maskMap.getKey();
if (rpMaskingView.getMaskName().toLowerCase().contains(RECOVERPOINT_JOURNAL)) {
_log.info(String.format("Not considering %s for this RP export", rpMaskingView.getMaskName()));
continue;
}
matchingMaskMap.put(maskMap.getKey(), maskMap.getValue());
}
return matchingMaskMap;
}
// If the answer to the above question is yes, we need a way to handle that.
if (exportGroup.checkInternalFlags(Flag.RECOVERPOINT_JOURNAL)) {
_log.info("Looking for masks with JOURNAL keyword since this export group is intended for journal volumes only");
for (Entry<ExportMask, ExportMaskPolicy> maskMap : masksMap.entrySet()) {
ExportMask rpMaskingView = maskMap.getKey();
if (rpMaskingView.getMaskName().toLowerCase().contains(RECOVERPOINT_JOURNAL)) {
matchingMaskMap.put(maskMap.getKey(), maskMap.getValue());
}
}
return matchingMaskMap;
}
List<String> initiators = getComputeResourceInitiators(exportGroup);
// Fetch all the existing masks for the compute resource
Map<String, Set<URI>> crMaskingViews = getDevice().findExportMasks(storage, initiators, false);
Map<URI, ExportMask> crMaskingViewMap = new HashMap<URI, ExportMask>();
for (Entry<String, Set<URI>> crMaskingViewEntry : crMaskingViews.entrySet()) {
Set<URI> crMaskingView = crMaskingViewEntry.getValue();
for (URI crMaskingViewUri : crMaskingView) {
crMaskingViewMap.put(crMaskingViewUri, _dbClient.queryObject(ExportMask.class, crMaskingViewUri));
}
}
// We need to weed through this list to find only those masking view that is compatible with the list of masking views for the compute resource
for (Entry<ExportMask, ExportMaskPolicy> maskMap : masksMap.entrySet()) {
ExportMask rpMaskingView = maskMap.getKey();
// Ignore RP masks with journal keyword.
if (rpMaskingView.getMaskName().toLowerCase().contains(RECOVERPOINT_JOURNAL)) {
_log.info(String.format("%s is a journal mask, not considering it for RP source/target copy volume", rpMaskingView.getMaskName()));
continue;
}
for (Entry<URI, ExportMask> crMaskingViewMapEntry : crMaskingViewMap.entrySet()) {
ExportMask crMaskingView = crMaskingViewMapEntry.getValue();
// If the storage ports in the compute resource mask contains all the ports in the RP mask, then we have match.
if (crMaskingView.getStoragePorts().size() >= rpMaskingView.getStoragePorts().size() && crMaskingView.getStoragePorts().containsAll(rpMaskingView.getStoragePorts())) {
if (!matchingMaskMap.containsKey(rpMaskingView)) {
_log.info(String.format("Found a RP masking view %s that has the same storage ports as the computer resource (host/cluster) mask %s to which we are exporting the volume. " + "OK to use the RP masking view.", rpMaskingView.getMaskName(), crMaskingView.getMaskName()));
matchingMaskMap.put(rpMaskingView, maskMap.getValue());
}
}
}
}
if (matchingMaskMap.isEmpty()) {
_log.info("No RP masks found that align with to the compute resources' masks");
if (!masksMap.isEmpty()) {
_log.info("There are existing RP masks but none align with the masks for the compute resource. Check to see if they can be re-used");
return masksMap;
} else {
_log.info("No existing masks found for the compute resource, proceed as normal");
}
}
return matchingMaskMap;
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method exportGroupAddInitiators.
@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
BlockStorageDevice device = getDevice();
String previousStep = null;
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
logExportGroup(exportGroup, storageURI);
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);
Map<URI, List<URI>> zoneMasksToInitiatorsURIs = new HashMap<URI, List<URI>>();
Map<URI, Map<URI, Integer>> zoneNewMasksToVolumeMap = new HashMap<URI, Map<URI, Integer>>();
Map<URI, ExportMask> refreshedMasks = new HashMap<URI, ExportMask>();
// Populate a map of volumes on the storage device
List<BlockObject> blockObjects = new ArrayList<BlockObject>();
Map<URI, Integer> volumeMap = new HashMap<URI, Integer>();
if (exportGroup != null && exportGroup.getVolumes() != null) {
for (Map.Entry<String, String> entry : exportGroup.getVolumes().entrySet()) {
URI boURI = URI.create(entry.getKey());
Integer hlu = Integer.valueOf(entry.getValue());
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
if (bo.getStorageController().equals(storageURI)) {
volumeMap.put(boURI, hlu);
blockObjects.add(bo);
}
}
}
InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
boolean anyOperationsToDo = false;
Set<URI> partialMasks = new HashSet<>();
Map<String, Set<URI>> initiatorToExportMaskPlacementMap = determineInitiatorToExportMaskPlacements(exportGroup, storageURI, initiatorHelper.getResourceToInitiators(), device.findExportMasks(storage, initiatorHelper.getPortNames(), false), initiatorHelper.getPortNameToInitiatorURI(), partialMasks);
if (!initiatorToExportMaskPlacementMap.isEmpty()) {
Map<URI, ExportMaskPolicy> policyCache = new HashMap<>();
// The logic contained here is trying to place the initiators that were passed down in the
// request. If we are in this path where the initiatorToExportMaskPlacementMap is not empty, then there
// are several cases why we got here:
//
// 1). An ExportMask has been found that is associated with the ExportGroup and it
// is supposed to be the container for the compute resources that we are attempting
// to add initiators for.
// 2). An ExportMask has been found that is on the array. It may not be associated with the
// ExportGroup, but it is supposed to be the container for the compute resources that
// we are attempting to add initiators for.
// 3). An ExportMask has been found that is on the array. It may not be associated with the
// ExportGroup, but it has the initiators that we are trying to add
// 4). One of the above possibilities + an initiator that cannot be placed. The use-case here
// would someone adds a new initiator for an existing host and a new host to a cluster export.
List<URI> initiatorsToPlace = new ArrayList<URI>();
initiatorsToPlace.addAll(initiatorURIs);
// This loop will determine a list of volumes to update per export mask
Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
for (Map.Entry<String, Set<URI>> entry : initiatorToExportMaskPlacementMap.entrySet()) {
URI initiatorURI = initiatorHelper.getPortNameToInitiatorURI().get(entry.getKey());
if (initiatorURI == null || exportGroup == null) {
// This initiator does not exist or it is not one of the initiators passed to the function
continue;
}
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
// Get a list of the ExportMasks that were matched to the initiator
List<URI> exportMaskURIs = new ArrayList<URI>();
exportMaskURIs.addAll(entry.getValue());
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
_log.info(String.format("Trying to place initiator %s", entry.getKey()));
for (ExportMask mask : masks) {
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
_log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
_log.info(String.format("Trying to place initiator %s in mask %s", entry.getKey(), mask.getMaskName()));
if (mask.getInactive() && !mask.getStorageDevice().equals(storageURI)) {
continue;
}
// determineInitiatorToExportMaskPlacements or findExportMasks
if (!refreshedMasks.containsKey(mask.getId())) {
mask = device.refreshExportMask(storage, mask);
refreshedMasks.put(mask.getId(), mask);
}
ExportMaskPolicy policy = getExportMaskPolicy(policyCache, device, storage, mask);
// yet. The below logic will add the volumes necessary.
if (mask.hasInitiator(initiatorURI.toString()) && CollectionUtils.isEmpty(ExportUtils.getExportMasksSharingInitiator(_dbClient, initiatorURI, mask, exportMaskURIs))) {
_log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
// already in the masks to the placement list
for (BlockObject blockObject : blockObjects) {
// blockObject properties, and so on.
if (!mask.hasExistingVolume(blockObject.getWWN()) && !mask.hasVolume(blockObject.getId())) {
String volumePolicyName = ControllerUtils.getAutoTieringPolicyName(blockObject.getId(), _dbClient);
if (((volumePolicyName == null || volumePolicyName.equalsIgnoreCase(Constants.NONE.toString())) && (policy.tierPolicies == null || policy.tierPolicies.isEmpty())) || ((volumePolicyName != null && policy.tierPolicies != null && policy.tierPolicies.size() == 1 && policy.tierPolicies.contains(volumePolicyName)))) {
_log.info(String.format("mask doesn't have volume %s yet, need to add it", blockObject.getId()));
Map<URI, Integer> newVolumesMap = existingMasksToUpdateWithNewVolumes.get(mask.getId());
if (newVolumesMap == null) {
newVolumesMap = new HashMap<URI, Integer>();
existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumesMap);
}
newVolumesMap.put(blockObject.getId(), volumeMap.get(blockObject.getId()));
}
} else {
_log.info(String.format("not adding volume %s to mask %s", blockObject.getId(), mask.getMaskName()));
}
}
// The initiator has been placed - it is in an already existing export
// for which case, we may just have to add volumes to it
initiatorsToPlace.remove(initiatorURI);
} else {
Set<URI> existingInitiatorIds = ExportMaskUtils.getAllInitiatorsForExportMask(_dbClient, mask);
if (existingInitiatorIds.isEmpty()) {
_log.info(String.format("not adding initiator to %s mask %s because there are no initiators associated with this mask", initiatorURI, mask.getMaskName()));
}
// This mask does not contain the initiator, but it may not belong to the same compute resource.
for (URI existingInitiatorId : existingInitiatorIds) {
Initiator existingInitiator = _dbClient.queryObject(Initiator.class, existingInitiatorId);
if (existingInitiator == null) {
_log.warn(String.format("Initiator %s was found to be associated with ExportMask %s, but no longer exists in the DB", existingInitiatorId, mask.getId()));
continue;
}
if ((existingInitiator.getHost() != null && existingInitiator.getHost().equals(initiator.getHost())) || (existingInitiator.getClusterName() != null && existingInitiator.getClusterName().equals(initiator.getClusterName()))) {
// Place the initiator in this ExportMask.
if (exportGroup.forCluster() && !policy.isCascadedIG() && ((existingInitiator.getHost() == null || !existingInitiator.getHost().equals(initiator.getHost())))) {
_log.info(String.format("not adding initiator to %s mask %s because it is likely part of another mask in the cluster", initiatorURI, mask.getMaskName()));
continue;
}
Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
if (existingMaskInitiators == null) {
existingMaskInitiators = new HashSet<Initiator>();
existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
}
_log.info(String.format("adding initiator to %s mask %s because it was found to be in the same compute resource", initiatorURI, mask.getMaskName()));
existingMaskInitiators.add(initiator);
// The initiator has been placed - it is not in the export, we will have to
// add it to the mask
initiatorsToPlace.remove(initiatorURI);
} else {
_log.info(String.format("not adding initiator to %s mask %s because it doesn't belong to the same compute resource", existingInitiator.getId(), mask.getMaskName()));
}
}
}
updateZoningMap(exportGroup, mask, true);
}
}
// so let's add them to the main tracker
if (!initiatorsToPlace.isEmpty()) {
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorsToPlace);
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
previousStep = result.getStepId();
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
anyOperationsToDo = true;
}
}
_log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
// At this point we have a mapping of all the masks that we need to update with new volumes
for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Map<URI, Integer> volumesToAdd = entry.getValue();
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.addAll(volumesToAdd.keySet());
List<ExportMask> masks = new ArrayList<ExportMask>();
masks.add(mask);
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, masks, volumeURIs);
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToAdd, null);
anyOperationsToDo = true;
}
// At this point we have a mapping of all the masks that we need to update with new initiators
for (Map.Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
Set<Initiator> initiatorsToAdd = entry.getValue();
List<URI> initiatorsURIs = new ArrayList<URI>();
for (Initiator initiator : initiatorsToAdd) {
initiatorsURIs.add(initiator.getId());
}
_log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
maskToInitiatorsMap.put(mask.getId(), initiatorsURIs);
previousStep = generateExportMaskAddInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token);
previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
anyOperationsToDo = true;
}
} else {
_log.info("There are no masks for this export. Need to create anew.");
// zones required (which might be on multiple NetworkSystems.)
for (Map.Entry<String, List<URI>> resourceEntry : initiatorHelper.getResourceToInitiators().entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
previousStep = result.getStepId();
anyOperationsToDo = true;
}
}
if (anyOperationsToDo) {
if (!zoneNewMasksToVolumeMap.isEmpty()) {
List<URI> exportMaskList = new ArrayList<URI>();
exportMaskList.addAll(zoneNewMasksToVolumeMap.keySet());
Map<URI, Integer> overallVolumeMap = new HashMap<URI, Integer>();
for (Map<URI, Integer> oneVolumeMap : zoneNewMasksToVolumeMap.values()) {
overallVolumeMap.putAll(oneVolumeMap);
}
previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, exportMaskList, overallVolumeMap);
}
if (!zoneMasksToInitiatorsURIs.isEmpty()) {
previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, zoneMasksToInitiatorsURIs);
}
String successMessage = String.format("Successfully exported to initiators on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VmaxVolumeToExportMaskRuleApplicator method createPolicyMap.
/**
* Using the contextual information produce a mapping of ExportMask to its ExportMaskPolicy.
*
* @return Map of ExportMask object to ExportMaskPolicy
*/
private Map<ExportMask, ExportMaskPolicy> createPolicyMap() {
// Translate the ExportMask URI to ExportMaskPolicy cache to a mapping of ExportMask object to ExportMaskPolicy
Map<ExportMask, ExportMaskPolicy> policyMap = new HashMap<>();
Iterator<ExportMask> exportMasks = dbClient.queryIterativeObjects(ExportMask.class, context.exportMaskURIToPolicy.keySet());
while (exportMasks.hasNext()) {
ExportMask mask = exportMasks.next();
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
ExportMaskPolicy policy = context.exportMaskURIToPolicy.get(mask.getId());
if (policy != null) {
policyMap.put(mask, policy);
} else {
log.error("Could not find an ExportMaskPolicy for {} ({})", mask.getMaskName(), mask.getId());
}
}
return policyMap;
}
Aggregations