Search in sources :

Example 81 with Initiator

use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.

the class VmaxMaskingOrchestrator method changePortGroup.

@Override
public void changePortGroup(URI storageURI, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String token) {
    ExportChangePortGroupCompleter taskCompleter = null;
    try {
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
        taskCompleter = new ExportChangePortGroupCompleter(storageURI, exportGroupURI, token, portGroupURI);
        logExportGroup(exportGroup, storageURI);
        String workflowKey = "changePortGroup";
        if (_workflowService.hasWorkflowBeenCreated(token, workflowKey)) {
            return;
        }
        Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), workflowKey, false, token);
        if (CollectionUtils.isEmpty(exportMaskURIs)) {
            _log.info("No export masks to change");
            taskCompleter.ready(_dbClient);
            return;
        }
        List<ExportMask> exportMasks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
        String previousStep = null;
        Set<URI> hostURIs = new HashSet<URI>();
        for (ExportMask oldMask : exportMasks) {
            // create a new masking view using the new port group
            SmisStorageDevice device = (SmisStorageDevice) getDevice();
            oldMask = device.refreshExportMask(storage, oldMask);
            StringSet existingInits = oldMask.getExistingInitiators();
            StringMap existingVols = oldMask.getExistingVolumes();
            if (!CollectionUtils.isEmpty(existingInits)) {
                String error = String.format("The export mask %s has unmanaged initiators %s", oldMask.getMaskName(), Joiner.on(',').join(existingInits));
                _log.error(error);
                ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
                taskCompleter.error(_dbClient, serviceError);
                return;
            }
            if (!CollectionUtils.isEmpty(existingVols)) {
                String error = String.format("The export mask %s has unmanaged volumes %s", oldMask.getMaskName(), Joiner.on(',').join(existingVols.keySet()));
                _log.error(error);
                ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
                taskCompleter.error(_dbClient, serviceError);
                return;
            }
            InitiatorHelper initiatorHelper = new InitiatorHelper(StringSetUtil.stringSetToUriList(oldMask.getInitiators())).process(exportGroup);
            List<String> initiatorNames = initiatorHelper.getPortNames();
            List<URI> volumes = StringSetUtil.stringSetToUriList(oldMask.getVolumes().keySet());
            ExportPathParams pathParams = _blockScheduler.calculateExportPathParamForVolumes(volumes, 0, storageURI, exportGroupURI);
            pathParams.setStoragePorts(portGroup.getStoragePorts());
            List<Initiator> initiators = ExportUtils.getExportMaskInitiators(oldMask, _dbClient);
            List<URI> initURIs = new ArrayList<URI>();
            for (Initiator init : initiators) {
                if (!NullColumnValueGetter.isNullURI(init.getHost())) {
                    hostURIs.add(init.getHost());
                }
                initURIs.add(init.getId());
            }
            // Get impacted export groups
            List<ExportGroup> impactedExportGroups = ExportMaskUtils.getExportGroups(_dbClient, oldMask);
            List<URI> exportGroupURIs = URIUtil.toUris(impactedExportGroups);
            _log.info("changePortGroup: exportMask {}, impacted export groups: {}", oldMask.getMaskName(), Joiner.on(',').join(exportGroupURIs));
            Map<URI, List<URI>> assignments = _blockScheduler.assignStoragePorts(storage, exportGroup, initiators, null, pathParams, volumes, _networkDeviceController, exportGroup.getVirtualArray(), token);
            // Trying to find if there is existing export mask or masking view for the same host and using the new
            // port group. If found one, add the volumes in the current export mask to the new one; otherwise, create
            // a new export mask/masking view, with the same storage group, initiator group and the new port group.
            // then delete the current export mask.
            ExportMask newMask = device.findExportMasksForPortGroupChange(storage, initiatorNames, portGroupURI);
            Map<URI, Integer> volumesToAdd = StringMapUtil.stringMapToVolumeMap(oldMask.getVolumes());
            if (newMask != null) {
                updateZoningMap(exportGroup, newMask, true);
                _log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), newMask.getMaskName()));
                previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(newMask), new ArrayList<URI>(volumesToAdd.keySet()));
                String addVolumeStep = workflow.createStepId();
                ExportTaskCompleter exportTaskCompleter = new ExportMaskAddVolumeCompleter(exportGroupURI, newMask.getId(), volumesToAdd, addVolumeStep);
                exportTaskCompleter.setExportGroups(exportGroupURIs);
                Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportGroupAddVolumes", storageURI, exportGroupURI, newMask.getId(), volumesToAdd, null, exportTaskCompleter);
                Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupAddVolumes", storageURI, exportGroupURI, exportGroupURIs, newMask.getId(), volumesToAdd, initURIs, addVolumeStep);
                previousStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Adding volumes to mask %s (%s)", newMask.getMaskName(), newMask.getId().toString()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, addVolumeStep);
                previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, newMask, volumesToAdd, null);
            } else {
                // We don't find existing export mask /masking view, we will create a new one.
                // first, to construct the new export mask name, if the export mask has the original name, then
                // append the new port group name to the current export mask name; if the export mask already has the current
                // port group name appended, then remove the current port group name, and append the new one.
                String oldName = oldMask.getMaskName();
                URI oldPGURI = oldMask.getPortGroup();
                if (oldPGURI != null) {
                    StoragePortGroup oldPG = _dbClient.queryObject(StoragePortGroup.class, oldPGURI);
                    if (oldPG != null) {
                        String pgName = oldPG.getLabel();
                        if (oldName.endsWith(pgName)) {
                            oldName = oldName.replaceAll(pgName, "");
                        }
                    }
                }
                String maskName = null;
                if (oldName.endsWith("_")) {
                    maskName = String.format("%s%s", oldName, portGroup.getLabel());
                } else {
                    maskName = String.format("%s_%s", oldName, portGroup.getLabel());
                }
                newMask = ExportMaskUtils.initializeExportMask(storage, exportGroup, initiators, volumesToAdd, getStoragePortsInPaths(assignments), assignments, maskName, _dbClient);
                newMask.setPortGroup(portGroupURI);
                List<BlockObject> vols = new ArrayList<BlockObject>();
                for (URI boURI : volumesToAdd.keySet()) {
                    BlockObject bo = BlockObject.fetch(_dbClient, boURI);
                    vols.add(bo);
                }
                newMask.addToUserCreatedVolumes(vols);
                _dbClient.updateObject(newMask);
                _log.info(String.format("Creating new exportMask %s", maskName));
                // Make a new TaskCompleter for the exportStep. It has only one subtask.
                // This is due to existing requirements in the doExportGroupCreate completion
                // logic.
                String maskingStep = workflow.createStepId();
                ExportTaskCompleter exportTaskCompleter = new ExportMaskChangePortGroupAddMaskCompleter(newMask.getId(), exportGroupURI, maskingStep);
                exportTaskCompleter.setExportGroups(exportGroupURIs);
                Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportChangePortGroupAddPaths", storageURI, exportGroupURI, newMask.getId(), oldMask.getId(), portGroupURI, exportTaskCompleter);
                Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupCreate", storageURI, exportGroupURI, newMask.getId(), maskingStep);
                maskingStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Create export mask(%s) to use port group %s", newMask.getMaskName(), portGroup.getNativeGuid()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, maskingStep);
                String zoningStep = workflow.createStepId();
                List<URI> masks = new ArrayList<URI>();
                masks.add(newMask.getId());
                previousStep = generateZoningCreateWorkflow(workflow, maskingStep, exportGroup, masks, volumesToAdd, zoningStep);
            }
        }
        previousStep = _wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
        if (waitForApproval) {
            // Insert a step that will be suspended. When it resumes, it will re-acquire the lock keys,
            // which are released when the workflow suspends.
            List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), storageURI);
            String suspendMessage = "Adjust/rescan host/cluster paths. Press \"Resume\" to start removal of unnecessary paths." + "\"Rollback\" will terminate the order and roll back";
            Workflow.Method method = WorkflowService.acquireWorkflowLocksMethod(lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
            Workflow.Method rollbackNull = Workflow.NULL_METHOD;
            previousStep = workflow.createStep("AcquireLocks", "Suspending for user verification of host/cluster connectivity.", previousStep, storage.getId(), storage.getSystemType(), WorkflowService.class, method, rollbackNull, waitForApproval, null);
            workflow.setSuspendedStepMessage(previousStep, suspendMessage);
        }
        for (ExportMask exportMask : exportMasks) {
            previousStep = generateChangePortGroupDeleteMaskWorkflowstep(storageURI, exportGroup, exportMask, previousStep, workflow);
        }
        _wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
        if (!workflow.getAllStepStatus().isEmpty()) {
            _log.info("The change port group workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
            // update ExportChangePortGroupCompleter with affected export groups
            Set<URI> affectedExportGroups = new HashSet<URI>();
            for (ExportMask mask : exportMasks) {
                List<ExportGroup> assocExportGroups = ExportMaskUtils.getExportGroups(_dbClient, mask);
                for (ExportGroup eg : assocExportGroups) {
                    affectedExportGroups.add(eg.getId());
                }
            }
            taskCompleter.setAffectedExportGroups(affectedExportGroups);
            workflow.executePlan(taskCompleter, "Change port group successfully.");
            _workflowService.markWorkflowBeenCreated(token, workflowKey);
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception e) {
        _log.error("Export change port group Orchestration failed.", e);
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : StoragePortGroup(com.emc.storageos.db.client.model.StoragePortGroup) StringMap(com.emc.storageos.db.client.model.StringMap) ExportTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URI(java.net.URI) Initiator(com.emc.storageos.db.client.model.Initiator) WorkflowService(com.emc.storageos.workflow.WorkflowService) StringSet(com.emc.storageos.db.client.model.StringSet) List(java.util.List) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) HashSet(java.util.HashSet) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) ExportMaskChangePortGroupAddMaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportMaskChangePortGroupAddMaskCompleter) Workflow(com.emc.storageos.workflow.Workflow) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ExportChangePortGroupCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportChangePortGroupCompleter) ExportMaskAddVolumeCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportMaskAddVolumeCompleter) SmisStorageDevice(com.emc.storageos.volumecontroller.impl.smis.SmisStorageDevice) ExportPathParams(com.emc.storageos.db.client.model.ExportPathParams)

Example 82 with Initiator

use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.

the class VmaxMaskingOrchestrator method updatePlacementMapForCluster.

/**
 * Special case for VMAX with a cluster compute resource:
 *
 * In the case where a mask may contain a subset of nodes of a cluster, we wish to leverage it.
 *
 * Logic is as follows: Attempt to discover which ports have not been placed in the map yet (specific to VMAX),
 * and add those ports to the map in the circumstance where we are doing cluster and the
 * existing mask is already handling multiple hosts.
 *
 * In the case of brownfield cluster, some of these port to ExportMask may be missing because the array doesn't
 * have them yet. Find this condition and add the additional ports to the map.
 *
 * @param exportGroup export group
 * @param resourceToInitiators resource -> initiator list
 * @param initiatorToExportMaskPlacementMap placement mask map from the default orchestrator
 */
private void updatePlacementMapForCluster(ExportGroup exportGroup, Map<String, List<URI>> resourceToInitiators, Map<String, Set<URI>> initiatorToExportMaskPlacementMap) {
    // double check we're dealing with cluster
    if (exportGroup.forCluster() || exportGroup.forHost()) {
        // Safety, ensure the map has been created.
        if (initiatorToExportMaskPlacementMap == null) {
            initiatorToExportMaskPlacementMap = new HashMap<String, Set<URI>>();
        }
        // Check each compute resource's initiator list
        for (Map.Entry<String, List<URI>> entry : resourceToInitiators.entrySet()) {
            List<URI> initiatorSet = entry.getValue();
            for (URI initiatorURI : initiatorSet) {
                Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
                // Is this initiator covered in the map yet?
                Set<URI> exportMasksToAdd = new HashSet<URI>();
                if (!initiatorToExportMaskPlacementMap.keySet().contains(Initiator.normalizePort(initiator.getInitiatorPort()))) {
                    // Can we find an existing intiatorToExportMaskURIMap entry that contains the same compute resource?
                    for (String port : initiatorToExportMaskPlacementMap.keySet()) {
                        // Verify it's the same compute resource
                        Initiator existingInitiator = ExportUtils.getInitiator(Initiator.toPortNetworkId(port), _dbClient);
                        if (existingInitiator != null && ((exportGroup.forCluster() && existingInitiator.getClusterName().equals(initiator.getClusterName())) || (exportGroup.forHost() && existingInitiator.getHostName().equals(initiator.getHostName())))) {
                            // Go through the masks, verify they are all multi-host already
                            for (URI maskId : initiatorToExportMaskPlacementMap.get(port)) {
                                ExportMask mask = _dbClient.queryObject(ExportMask.class, maskId);
                                if (exportGroup.forHost() || maskAppliesToMultipleHosts(mask)) {
                                    // Create a new map entry for this initiator.
                                    exportMasksToAdd.add(mask.getId());
                                }
                            }
                        } else {
                            _log.info("Initiator {} does not have any masks that match its compute resource", initiator.getInitiatorPort());
                        }
                    }
                }
                if (!exportMasksToAdd.isEmpty()) {
                    _log.info("Initiator {} - to be added to export masks: {}", initiator.getInitiatorPort(), exportMasksToAdd);
                    initiatorToExportMaskPlacementMap.put(Initiator.normalizePort(initiator.getInitiatorPort()), exportMasksToAdd);
                }
            }
        }
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) StringSet(com.emc.storageos.db.client.model.StringSet) Initiator(com.emc.storageos.db.client.model.Initiator) ExportMask(com.emc.storageos.db.client.model.ExportMask) List(java.util.List) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) Map(java.util.Map) HashMap(java.util.HashMap) StringMap(com.emc.storageos.db.client.model.StringMap) URI(java.net.URI) HashSet(java.util.HashSet)

Example 83 with Initiator

use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.

the class VmaxMaskingOrchestrator method mapInitiatorsToComputeResource.

/**
 * This function processes the initiatorURIs and return a mapping of String
 * host or cluster resource reference to a list Initiator URIs.
 *
 * This is the default implementation and it will group the
 * initiator's host reference
 *
 * @param exportGroup [in] - ExportGroup object to examine
 * @param initiatorURIs [in] - Initiator URIs
 * @return Map of String:computeResourceName to List of Initiator URIs
 */
@Override
protected Map<String, List<URI>> mapInitiatorsToComputeResource(ExportGroup exportGroup, Collection<URI> initiatorURIs) {
    Map<String, List<URI>> result = new HashMap<String, List<URI>>();
    if (exportGroup.forCluster()) {
        Cluster singleCluster = null;
        if (exportGroup.getClusters() != null && exportGroup.getClusters().size() == 1) {
            String clusterUriString = exportGroup.getClusters().iterator().next();
            singleCluster = _dbClient.queryObject(Cluster.class, URI.create(clusterUriString));
        }
        for (URI newExportMaskInitiator : initiatorURIs) {
            Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
            if (initiator != null) {
                String clusterName = getClusterName(singleCluster, initiator);
                List<URI> initiatorSet = result.get(clusterName);
                if (initiatorSet == null) {
                    initiatorSet = new ArrayList<URI>();
                    result.put(clusterName, initiatorSet);
                }
                initiatorSet.add(newExportMaskInitiator);
                _log.info(String.format("cluster = %s, initiators to add to map: %s, ", clusterName, newExportMaskInitiator.toString()));
            }
        }
    } else {
        // Bogus URI for those initiators without a host object, helps maintain a good map.
        // We want to put bunch up the non-host initiators together.
        URI fillerHostURI = NullColumnValueGetter.getNullURI();
        for (URI newExportMaskInitiator : initiatorURIs) {
            Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
            // Not all initiators have hosts, be sure to handle either case.
            URI hostURI = initiator.getHost();
            if (hostURI == null) {
                hostURI = fillerHostURI;
            }
            List<URI> initiatorSet = result.get(hostURI.toString());
            if (initiatorSet == null) {
                initiatorSet = new ArrayList<URI>();
                result.put(hostURI.toString(), initiatorSet);
            }
            initiatorSet.add(initiator.getId());
            _log.info(String.format("host = %s, initiators to add to map: %d, ", hostURI, result.get(hostURI.toString()).size()));
        }
    }
    return result;
}
Also used : HashMap(java.util.HashMap) Initiator(com.emc.storageos.db.client.model.Initiator) Cluster(com.emc.storageos.db.client.model.Cluster) List(java.util.List) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) URI(java.net.URI)

Example 84 with Initiator

use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.

the class VmaxMaskingOrchestrator method determineExportGroupCreateSteps.

/**
 * Routine contains logic to create an export mask on the array
 *
 * @param workflow - Workflow object to create steps against
 * @param previousStep - [optional] Identifier of workflow step to wait for
 * @param device - BlockStorageDevice implementation
 * @param storage - StorageSystem object representing the underlying array
 * @param exportGroup - ExportGroup object representing Bourne-level masking
 * @param initiatorURIs - List of Initiator URIs
 * @param volumeMap - Map of Volume URIs to requested Integer HLUs
 * @param zoningStepNeeded - Determines whether zone step is needed
 * @param token - Identifier for the operation
 * @throws Exception
 */
@Override
public boolean determineExportGroupCreateSteps(Workflow workflow, String previousStep, BlockStorageDevice device, StorageSystem storage, ExportGroup exportGroup, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, boolean zoningStepNeeded, String token) throws Exception {
    // If we didn't create any workflows by the end of this method, we can return an appropriate exception (instead of the Task just
    // hanging)
    boolean flowCreated = false;
    InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
    // Find the qualifying export masks that are associated with any or all the ports in
    // portNames. We will have to do processing differently based on whether
    // or there is an existing ExportMasks.
    // 
    // In the case of clusters, we try to find the export mask that contains a subset of initiators
    // of the cluster, so we can build onto it.
    Set<URI> partialMasks = new HashSet<>();
    /**
     * For Cluster exports, we will not reuse any partial masking views. Masking view will only be reused if all the required cluster
     * initiators are available in the existing masking view. This is to simplify the existing design.
     * - If there are existing masking views already available, then we will not reuse it.
     * - If there are masking views with all required cluster initiators, we will reuse it.
     * - If there are masking views which has more than one Host in the Cluster but not all the hosts are part of it, then don't reuse.
     * - If there are existing masking views with all the hosts in the Cluster, but few of the hosts doesn't contain all ViPR discovered
     * initiators then don't reuse. Always try to create a new masking view for Cluster.
     *
     * Btw we consider only the host or cluster initiators connected to the network to be part of the given masking view.
     * If ViPR discovered X initiators in CLuster and only X-n are connected to network,
     * then we look for masking view with X-N initiators not X. Later during export the remaining initiators will be added to IG.
     * The existing IG can be one single IG with more than one host or it could be IG per host with missing initiators.
     * If X initiators are already available in the view, then we try to create a new masking view by reusing the IG.
     * During reuse if the masking view creation fails with Initiator-port is already available, then user has to modify the existing
     * initiator Group.
     */
    Map<String, Set<URI>> matchingMasks = device.findExportMasks(storage, initiatorHelper.getPortNames(), exportGroup.forCluster());
    Map<String, List<URI>> initiatorToComputeResourceMap = initiatorHelper.getResourceToInitiators();
    Map<String, Set<URI>> initiatorToExportMaskPlacementMap = determineInitiatorToExportMaskPlacements(exportGroup, storage.getId(), initiatorToComputeResourceMap, matchingMasks, initiatorHelper.getPortNameToInitiatorURI(), partialMasks);
    /**
     * COP-28674: During Vblock boot volume export, if existing masking views are found then check for existing volumes
     * If found throw exception. This condition is valid only for boot volume vblock export.
     */
    if (exportGroup.forHost() && ExportMaskUtils.isVblockHost(initiatorURIs, _dbClient) && ExportMaskUtils.isBootVolume(_dbClient, volumeMap)) {
        _log.info("VBlock boot volume Export: Validating the storage system {} to find existing masking views", storage.getNativeGuid());
        if (CollectionUtils.isEmpty(matchingMasks)) {
            _log.info("No existing masking views found, passed validation..");
        } else {
            List<String> maskNames = new ArrayList<String>();
            for (Entry<String, Set<URI>> maskEntry : matchingMasks.entrySet()) {
                List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, maskEntry.getValue());
                if (!CollectionUtils.isEmpty(masks)) {
                    for (ExportMask mask : masks) {
                        maskNames.add(mask.getMaskName());
                    }
                }
            }
            Set<String> computeResourceSet = initiatorToComputeResourceMap.keySet();
            ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
            ServiceError serviceError = DeviceControllerException.errors.existingMaskFoundDuringBootVolumeExport(Joiner.on(",").join(maskNames), computeResourceSet.iterator().next());
            completer.error(_dbClient, serviceError);
            return false;
        }
    } else {
        _log.info("VBlock Boot volume Export Validation : Skipping");
    }
    /**
     * To support multiple export for VMAX3 volumes with Host IO Limit, same Storage Group and
     * Port Group should be used to create a new masking view. Re-using same storage group will
     * lead to problems as it could have additional volumes. Also reusing a child storage group
     * in a masking view to another masking view is not supported.
     */
    if (storage.checkIfVmax3() && ExportUtils.checkIfvPoolHasHostIOLimitSet(_dbClient, volumeMap)) {
        _log.info("Volumes have Host IO Limit set in virtual pools. Validating for multiple export..");
        Map<String, List<URI>> storageGroupToVolumes = getDevice().groupVolumesByStorageGroupWithHostIOLimit(storage, volumeMap.keySet());
        if (!storageGroupToVolumes.isEmpty()) {
            ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
            ServiceError serviceError = DeviceControllerException.errors.cannotMultiExportVolumesWithHostIOLimit(Joiner.on(",").join(storageGroupToVolumes.keySet()), Joiner.on(",").join(storageGroupToVolumes.values()));
            completer.error(_dbClient, serviceError);
            return false;
        }
    }
    findAndUpdateFreeHLUsForClusterExport(storage, exportGroup, initiatorURIs, volumeMap);
    // as well. Certainly not as clearly as this will, but regardless.
    if (initiatorToExportMaskPlacementMap.isEmpty()) {
        _log.info(String.format("No existing mask found w/ initiators { %s }", Joiner.on(",").join(initiatorHelper.getPortNames())));
        if (!initiatorURIs.isEmpty()) {
            Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
            for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
                String computeKey = resourceEntry.getKey();
                List<URI> computeInitiatorURIs = resourceEntry.getValue();
                _log.info(String.format("New export masks for %s", computeKey));
                GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
                previousStep = result.getStepId();
                flowCreated = true;
            }
        }
    } else {
        Map<URI, ExportMaskPolicy> policyCache = new HashMap<>();
        _log.info(String.format("Mask(s) found w/ initiators {%s}. " + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}", Joiner.on(",").join(initiatorHelper.getPortNames()), Joiner.on(",").join(initiatorToExportMaskPlacementMap.values()), Joiner.on(",").join(initiatorHelper.getPortNameToInitiatorURI().entrySet())));
        // There are some initiators that already exist. We need to create a
        // workflow that create new masking containers or updates masking
        // containers as necessary.
        // These data structures will be used to track new initiators - ones
        // that don't already exist on the array
        List<URI> initiatorURIsCopy = new ArrayList<URI>();
        initiatorURIsCopy.addAll(initiatorURIs);
        // This loop will determine a list of volumes to update per export mask
        Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
        Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
        Set<URI> initiatorsForNewExport = new HashSet<>();
        // have them yet. Find this condition and add the additional ports to the map.
        if (exportGroup.forCluster() || exportGroup.forHost()) {
            updatePlacementMapForCluster(exportGroup, initiatorHelper.getResourceToInitiators(), initiatorToExportMaskPlacementMap);
        }
        // to mark that these initiators need to be added to the existing masks.
        for (Map.Entry<String, Set<URI>> entry : initiatorToExportMaskPlacementMap.entrySet()) {
            URI initiatorURI = initiatorHelper.getPortNameToInitiatorURI().get(entry.getKey());
            Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
            // Keep track of those initiators that have been found to exist already
            // in some export mask on the array
            initiatorURIsCopy.remove(initiatorURI);
            List<URI> exportMaskURIs = new ArrayList<URI>();
            exportMaskURIs.addAll(entry.getValue());
            List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
            _log.info(String.format("initiator %s masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
            // is not yet added to the mask. Note the masks were all refreshed by #device.findExportMasks() above
            for (ExportMask mask : masks) {
                _log.info(String.format("processing mask %s and initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
                // Check for NO_VIPR. If found, avoid this mask.
                if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
                    _log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
                    continue;
                }
                ExportMaskPolicy exportMaskDetails = getExportMaskPolicy(policyCache, device, storage, mask);
                // Check if the ExportMask applies to more than one host. Since
                // ViPR will be creating ExportMask per compute resource
                // (single host or cluster), the only way that an existing mask
                // applies to multiple hosts is when it was for a cluster
                // export. If we find that to be the case,
                // we should be able to create ExportMasks for it.
                boolean hasMultipleHosts = maskAppliesToMultipleHosts(mask);
                boolean createHostExportWhenClusterExportExists = (hasMultipleHosts && exportGroup.forHost());
                // One node cluster Case - Always create a new MV if existing mask doesn't contain Cascaded IG.
                boolean createClusterExportWhenHostExportExists = (exportGroup.forCluster() && !exportMaskDetails.isCascadedIG());
                if (createClusterExportWhenHostExportExists || createHostExportWhenClusterExportExists) {
                    // It may turn out that we find these initiators already covered by a collection of
                    // masks for cluster purposes. If that's the case, we figure that out below and these
                    // "new" exports will never see the light of day.
                    _log.info("New export mask will be created for initiator {}", initiatorURI);
                    initiatorsForNewExport.add(initiatorURI);
                    // remove this mask from policyCache
                    policyCache.remove(mask.getId());
                    continue;
                }
                // in our export group, because we would simply add to them.
                if (mask.getInitiators() != null) {
                    for (String existingMaskInitiatorStr : mask.getInitiators()) {
                        Initiator existingMaskInitiator = _dbClient.queryObject(Initiator.class, URI.create(existingMaskInitiatorStr));
                        // we should add it to this mask.
                        if ((initiator != null && initiator.getId() != null) && // and we don't have an entry already to add this initiator to the mask
                        (!existingMasksToUpdateWithNewInitiators.containsKey(mask.getId()) || !existingMasksToUpdateWithNewInitiators.get(mask.getId()).contains(initiator)) && // and the initiator exists in the first place
                        (existingMaskInitiator != null && // and this is a host export for this host, or...
                        (exportGroup.forHost() && initiator.getHost() != null && initiator.getHost().equals(existingMaskInitiator.getHost()) || // this is a cluster export for this cluster
                        (exportGroup.forCluster() && initiator.getClusterName() != null && initiator.getClusterName().equals(existingMaskInitiator.getClusterName()))))) {
                            // Add to the list of initiators we need to add to this mask
                            Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
                            if (existingMaskInitiators == null) {
                                existingMaskInitiators = new HashSet<Initiator>();
                                existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
                            }
                            // to the export group later.
                            if (!mask.hasInitiator(initiator.getId().toString())) {
                                existingMaskInitiators.add(initiator);
                                _log.info(String.format("initiator %s needs to be added to mask %s", initiator.getInitiatorPort(), mask.getMaskName()));
                            }
                        }
                    }
                }
            }
        }
        VmaxVolumeToExportMaskApplicatorContext context = createVmaxNativeApplicatorContext(workflow, exportGroup, storage, policyCache, zoningStepNeeded, token, initiatorHelper, initiatorToExportMaskPlacementMap, initiatorURIsCopy, partialMasks, volumeMap, initiatorsForNewExport, existingMasksToUpdateWithNewVolumes, existingMasksToUpdateWithNewInitiators, previousStep);
        NativeVolumeToExportMaskRuleApplicator ruleApplicator = new NativeVolumeToExportMaskRuleApplicator(_dbClient, context);
        ruleApplicator.run();
        if (context.resultSuccess) {
            // Set the flags that should have been filled in by NativeVolumeToExportMaskRuleApplicator running
            previousStep = context.previousStep;
            flowCreated = context.flowCreated;
        } else {
            _log.info("Failure in volume to ExportMask rules");
            return false;
        }
        _log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
        // or vice-versa.
        if (!initiatorsForNewExport.isEmpty()) {
            _log.info("Initiators for which new Export Mask will be created: {}", initiatorsForNewExport);
            if (exportGroup.forCluster() && !initiatorURIsCopy.isEmpty()) {
                // Clustered export group create request and there are essentially
                // new and existing initiators. We'll take what's not already
                // exported to and add it to the list of initiators to export
                initiatorsForNewExport.addAll(initiatorURIsCopy);
                // Clear the copy list because we're going to be creating exports
                // for these. (There's code below that uses initiatorURIsCopy to
                // determine what exports to update)
                initiatorURIsCopy.clear();
            }
            Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorsForNewExport);
            for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
                String computeKey = resourceEntry.getKey();
                List<URI> computeInitiatorURIs = resourceEntry.getValue();
                _log.info(String.format("New export masks for %s", computeKey));
                GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
                flowCreated = true;
                previousStep = result.getStepId();
                if (zoningStepNeeded) {
                    String zoningStep = workflow.createStepId();
                    List<URI> masks = new ArrayList<URI>();
                    masks.add(result.getMaskURI());
                    previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, masks, volumeMap, zoningStep);
                }
            }
        }
        // The initiatorURIsCopy was used in the for each initiator loop to see
        // which initiators already exist in a mask. If it is non-empty,
        // then it means there are initiators that are new,
        // so let's add them to the main tracker
        Map<String, List<URI>> newComputeResources = mapInitiatorsToComputeResource(exportGroup, initiatorURIsCopy);
        // and/or add volumes to existing masks.
        if (newComputeResources != null && !newComputeResources.isEmpty()) {
            for (Map.Entry<String, List<URI>> entry : newComputeResources.entrySet()) {
                // We have some brand new initiators, let's add them to new masks
                _log.info(String.format("New mask needed for compute resource %s", entry.getKey()));
                GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, entry.getValue(), volumeMap, token);
                flowCreated = true;
                previousStep = result.getStepId();
                // Add zoning step
                if (zoningStepNeeded) {
                    String zoningStep = workflow.createStepId();
                    List<URI> masks = new ArrayList<URI>();
                    masks.add(result.getMaskURI());
                    previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, masks, volumeMap, zoningStep);
                }
            }
        }
        // Put volumes in the existing masks that need them.
        for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
            ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
            updateZoningMap(exportGroup, mask, true);
            Map<URI, Integer> volumesToAdd = entry.getValue();
            _log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
            previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(mask), new ArrayList<URI>(volumesToAdd.keySet()));
            previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToAdd, null);
            flowCreated = true;
            exportGroup.addExportMask(mask.getId());
            _dbClient.updateObject(exportGroup);
        }
        // Put new initiators in existing masks that are missing them.
        for (Map.Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
            ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
            // modifying it or making it part of our export group.
            if (!existingMasksToUpdateWithNewVolumes.containsKey(mask.getId())) {
                _log.info(String.format("Not adding initiators to mask: %s because we found we don't need to change the mask", mask.getMaskName()));
                continue;
            }
            updateZoningMap(exportGroup, mask, true);
            exportGroup.addExportMask(mask.getId());
            _dbClient.updateObject(exportGroup);
            Set<Initiator> initiatorsToAdd = entry.getValue();
            if (!initiatorsToAdd.isEmpty()) {
                List<URI> initiatorsURIs = new ArrayList<URI>();
                for (Initiator initiator : initiatorsToAdd) {
                    initiatorsURIs.add(initiator.getId());
                }
                _log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
                Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
                maskToInitiatorsMap.put(mask.getId(), initiatorsURIs);
                previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
                previousStep = generateExportMaskAddInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, volumeMap.keySet(), token);
                flowCreated = true;
            }
        }
    }
    // Catch if no flows were created; close off the task
    if (!flowCreated) {
        ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
        completer.ready(_dbClient);
        return true;
    }
    return true;
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) StringSet(com.emc.storageos.db.client.model.StringSet) HashMap(java.util.HashMap) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URI(java.net.URI) Initiator(com.emc.storageos.db.client.model.Initiator) List(java.util.List) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) HashSet(java.util.HashSet) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) Map(java.util.Map) HashMap(java.util.HashMap) StringMap(com.emc.storageos.db.client.model.StringMap) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask)

Example 85 with Initiator

use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.

the class VmaxMaskingOrchestrator method exportGroupAddInitiators.

@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
    BlockStorageDevice device = getDevice();
    String previousStep = null;
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
    ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
    logExportGroup(exportGroup, storageURI);
    // Set up workflow steps.
    Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);
    Map<URI, List<URI>> zoneMasksToInitiatorsURIs = new HashMap<URI, List<URI>>();
    Map<URI, Map<URI, Integer>> zoneNewMasksToVolumeMap = new HashMap<URI, Map<URI, Integer>>();
    Map<URI, ExportMask> refreshedMasks = new HashMap<URI, ExportMask>();
    // Populate a map of volumes on the storage device
    List<BlockObject> blockObjects = new ArrayList<BlockObject>();
    Map<URI, Integer> volumeMap = new HashMap<URI, Integer>();
    if (exportGroup != null && exportGroup.getVolumes() != null) {
        for (Map.Entry<String, String> entry : exportGroup.getVolumes().entrySet()) {
            URI boURI = URI.create(entry.getKey());
            Integer hlu = Integer.valueOf(entry.getValue());
            BlockObject bo = BlockObject.fetch(_dbClient, boURI);
            if (bo.getStorageController().equals(storageURI)) {
                volumeMap.put(boURI, hlu);
                blockObjects.add(bo);
            }
        }
    }
    InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
    boolean anyOperationsToDo = false;
    Set<URI> partialMasks = new HashSet<>();
    Map<String, Set<URI>> initiatorToExportMaskPlacementMap = determineInitiatorToExportMaskPlacements(exportGroup, storageURI, initiatorHelper.getResourceToInitiators(), device.findExportMasks(storage, initiatorHelper.getPortNames(), false), initiatorHelper.getPortNameToInitiatorURI(), partialMasks);
    if (!initiatorToExportMaskPlacementMap.isEmpty()) {
        Map<URI, ExportMaskPolicy> policyCache = new HashMap<>();
        // The logic contained here is trying to place the initiators that were passed down in the
        // request. If we are in this path where the initiatorToExportMaskPlacementMap is not empty, then there
        // are several cases why we got here:
        // 
        // 1). An ExportMask has been found that is associated with the ExportGroup and it
        // is supposed to be the container for the compute resources that we are attempting
        // to add initiators for.
        // 2). An ExportMask has been found that is on the array. It may not be associated with the
        // ExportGroup, but it is supposed to be the container for the compute resources that
        // we are attempting to add initiators for.
        // 3). An ExportMask has been found that is on the array. It may not be associated with the
        // ExportGroup, but it has the initiators that we are trying to add
        // 4). One of the above possibilities + an initiator that cannot be placed. The use-case here
        // would someone adds a new initiator for an existing host and a new host to a cluster export.
        List<URI> initiatorsToPlace = new ArrayList<URI>();
        initiatorsToPlace.addAll(initiatorURIs);
        // This loop will determine a list of volumes to update per export mask
        Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>();
        Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>();
        for (Map.Entry<String, Set<URI>> entry : initiatorToExportMaskPlacementMap.entrySet()) {
            URI initiatorURI = initiatorHelper.getPortNameToInitiatorURI().get(entry.getKey());
            if (initiatorURI == null || exportGroup == null) {
                // This initiator does not exist or it is not one of the initiators passed to the function
                continue;
            }
            Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
            // Get a list of the ExportMasks that were matched to the initiator
            List<URI> exportMaskURIs = new ArrayList<URI>();
            exportMaskURIs.addAll(entry.getValue());
            List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
            _log.info(String.format("Trying to place initiator %s", entry.getKey()));
            for (ExportMask mask : masks) {
                // Check for NO_VIPR. If found, avoid this mask.
                if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
                    _log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
                    continue;
                }
                _log.info(String.format("Trying to place initiator %s in mask %s", entry.getKey(), mask.getMaskName()));
                if (mask.getInactive() && !mask.getStorageDevice().equals(storageURI)) {
                    continue;
                }
                // determineInitiatorToExportMaskPlacements or findExportMasks
                if (!refreshedMasks.containsKey(mask.getId())) {
                    mask = device.refreshExportMask(storage, mask);
                    refreshedMasks.put(mask.getId(), mask);
                }
                ExportMaskPolicy policy = getExportMaskPolicy(policyCache, device, storage, mask);
                // yet. The below logic will add the volumes necessary.
                if (mask.hasInitiator(initiatorURI.toString()) && CollectionUtils.isEmpty(ExportUtils.getExportMasksSharingInitiator(_dbClient, initiatorURI, mask, exportMaskURIs))) {
                    _log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
                    // already in the masks to the placement list
                    for (BlockObject blockObject : blockObjects) {
                        // blockObject properties, and so on.
                        if (!mask.hasExistingVolume(blockObject.getWWN()) && !mask.hasVolume(blockObject.getId())) {
                            String volumePolicyName = ControllerUtils.getAutoTieringPolicyName(blockObject.getId(), _dbClient);
                            if (((volumePolicyName == null || volumePolicyName.equalsIgnoreCase(Constants.NONE.toString())) && (policy.tierPolicies == null || policy.tierPolicies.isEmpty())) || ((volumePolicyName != null && policy.tierPolicies != null && policy.tierPolicies.size() == 1 && policy.tierPolicies.contains(volumePolicyName)))) {
                                _log.info(String.format("mask doesn't have volume %s yet, need to add it", blockObject.getId()));
                                Map<URI, Integer> newVolumesMap = existingMasksToUpdateWithNewVolumes.get(mask.getId());
                                if (newVolumesMap == null) {
                                    newVolumesMap = new HashMap<URI, Integer>();
                                    existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumesMap);
                                }
                                newVolumesMap.put(blockObject.getId(), volumeMap.get(blockObject.getId()));
                            }
                        } else {
                            _log.info(String.format("not adding volume %s to mask %s", blockObject.getId(), mask.getMaskName()));
                        }
                    }
                    // The initiator has been placed - it is in an already existing export
                    // for which case, we may just have to add volumes to it
                    initiatorsToPlace.remove(initiatorURI);
                } else {
                    Set<URI> existingInitiatorIds = ExportMaskUtils.getAllInitiatorsForExportMask(_dbClient, mask);
                    if (existingInitiatorIds.isEmpty()) {
                        _log.info(String.format("not adding initiator to %s mask %s because there are no initiators associated with this mask", initiatorURI, mask.getMaskName()));
                    }
                    // This mask does not contain the initiator, but it may not belong to the same compute resource.
                    for (URI existingInitiatorId : existingInitiatorIds) {
                        Initiator existingInitiator = _dbClient.queryObject(Initiator.class, existingInitiatorId);
                        if (existingInitiator == null) {
                            _log.warn(String.format("Initiator %s was found to be associated with ExportMask %s, but no longer exists in the DB", existingInitiatorId, mask.getId()));
                            continue;
                        }
                        if ((existingInitiator.getHost() != null && existingInitiator.getHost().equals(initiator.getHost())) || (existingInitiator.getClusterName() != null && existingInitiator.getClusterName().equals(initiator.getClusterName()))) {
                            // Place the initiator in this ExportMask.
                            if (exportGroup.forCluster() && !policy.isCascadedIG() && ((existingInitiator.getHost() == null || !existingInitiator.getHost().equals(initiator.getHost())))) {
                                _log.info(String.format("not adding initiator to %s mask %s because it is likely part of another mask in the cluster", initiatorURI, mask.getMaskName()));
                                continue;
                            }
                            Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId());
                            if (existingMaskInitiators == null) {
                                existingMaskInitiators = new HashSet<Initiator>();
                                existingMasksToUpdateWithNewInitiators.put(mask.getId(), existingMaskInitiators);
                            }
                            _log.info(String.format("adding initiator to %s mask %s because it was found to be in the same compute resource", initiatorURI, mask.getMaskName()));
                            existingMaskInitiators.add(initiator);
                            // The initiator has been placed - it is not in the export, we will have to
                            // add it to the mask
                            initiatorsToPlace.remove(initiatorURI);
                        } else {
                            _log.info(String.format("not adding initiator to %s mask %s because it doesn't belong to the same compute resource", existingInitiator.getId(), mask.getMaskName()));
                        }
                    }
                }
                updateZoningMap(exportGroup, mask, true);
            }
        }
        // so let's add them to the main tracker
        if (!initiatorsToPlace.isEmpty()) {
            Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorsToPlace);
            for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
                String computeKey = resourceEntry.getKey();
                List<URI> computeInitiatorURIs = resourceEntry.getValue();
                _log.info(String.format("New export masks for %s", computeKey));
                GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
                previousStep = result.getStepId();
                zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
                anyOperationsToDo = true;
            }
        }
        _log.info(String.format("existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size()));
        // At this point we have a mapping of all the masks that we need to update with new volumes
        for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) {
            ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
            Map<URI, Integer> volumesToAdd = entry.getValue();
            _log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
            List<URI> volumeURIs = new ArrayList<URI>();
            volumeURIs.addAll(volumesToAdd.keySet());
            List<ExportMask> masks = new ArrayList<ExportMask>();
            masks.add(mask);
            previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, masks, volumeURIs);
            previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToAdd, null);
            anyOperationsToDo = true;
        }
        // At this point we have a mapping of all the masks that we need to update with new initiators
        for (Map.Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
            ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
            Set<Initiator> initiatorsToAdd = entry.getValue();
            List<URI> initiatorsURIs = new ArrayList<URI>();
            for (Initiator initiator : initiatorsToAdd) {
                initiatorsURIs.add(initiator.getId());
            }
            _log.info(String.format("adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
            Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
            maskToInitiatorsMap.put(mask.getId(), initiatorsURIs);
            previousStep = generateExportMaskAddInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token);
            previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
            anyOperationsToDo = true;
        }
    } else {
        _log.info("There are no masks for this export. Need to create anew.");
        // zones required (which might be on multiple NetworkSystems.)
        for (Map.Entry<String, List<URI>> resourceEntry : initiatorHelper.getResourceToInitiators().entrySet()) {
            String computeKey = resourceEntry.getKey();
            List<URI> computeInitiatorURIs = resourceEntry.getValue();
            _log.info(String.format("New export masks for %s", computeKey));
            GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, previousStep, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
            zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumeMap);
            previousStep = result.getStepId();
            anyOperationsToDo = true;
        }
    }
    if (anyOperationsToDo) {
        if (!zoneNewMasksToVolumeMap.isEmpty()) {
            List<URI> exportMaskList = new ArrayList<URI>();
            exportMaskList.addAll(zoneNewMasksToVolumeMap.keySet());
            Map<URI, Integer> overallVolumeMap = new HashMap<URI, Integer>();
            for (Map<URI, Integer> oneVolumeMap : zoneNewMasksToVolumeMap.values()) {
                overallVolumeMap.putAll(oneVolumeMap);
            }
            previousStep = generateZoningCreateWorkflow(workflow, previousStep, exportGroup, exportMaskList, overallVolumeMap);
        }
        if (!zoneMasksToInitiatorsURIs.isEmpty()) {
            previousStep = generateZoningAddInitiatorsWorkflow(workflow, previousStep, exportGroup, zoneMasksToInitiatorsURIs);
        }
        String successMessage = String.format("Successfully exported to initiators on StorageArray %s", storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
    } else {
        taskCompleter.ready(_dbClient);
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) StringSet(com.emc.storageos.db.client.model.StringSet) HashMap(java.util.HashMap) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URI(java.net.URI) BlockStorageDevice(com.emc.storageos.volumecontroller.BlockStorageDevice) Initiator(com.emc.storageos.db.client.model.Initiator) List(java.util.List) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) HashSet(java.util.HashSet) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Map(java.util.Map) HashMap(java.util.HashMap) StringMap(com.emc.storageos.db.client.model.StringMap) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask)

Aggregations

Initiator (com.emc.storageos.db.client.model.Initiator)487 URI (java.net.URI)345 ArrayList (java.util.ArrayList)266 HashMap (java.util.HashMap)170 HashSet (java.util.HashSet)161 ExportMask (com.emc.storageos.db.client.model.ExportMask)156 List (java.util.List)119 URIQueryResultList (com.emc.storageos.db.client.constraint.URIQueryResultList)102 DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)98 Map (java.util.Map)85 StringSet (com.emc.storageos.db.client.model.StringSet)83 ExportGroup (com.emc.storageos.db.client.model.ExportGroup)82 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)81 StoragePort (com.emc.storageos.db.client.model.StoragePort)78 NamedURI (com.emc.storageos.db.client.model.NamedURI)73 Set (java.util.Set)72 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)58 StringSetMap (com.emc.storageos.db.client.model.StringSetMap)57 StringMap (com.emc.storageos.db.client.model.StringMap)55 Host (com.emc.storageos.db.client.model.Host)54