Search in sources :

Example 76 with StorageSystem

use of com.emc.storageos.db.client.model.StorageSystem in project coprhd-controller by CoprHD.

the class AbstractBasicMaskingOrchestrator method exportGroupRemoveInitiators.

@Override
public void exportGroupRemoveInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
        BlockStorageDevice device = getDevice();
        taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StringBuffer errorMessage = new StringBuffer();
        logExportGroup(exportGroup, storageURI);
        // Set up workflow steps.
        Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveInitiators", true, token);
        Initiator firstInitiator = _dbClient.queryObject(Initiator.class, initiatorURIs.get(0));
        // No need to validate the orchestrator level validation for vplex/rp. Hence ignoring validation for vplex/rp initiators.
        boolean isValidationNeeded = validatorConfig.isValidationEnabled() && !VPlexControllerUtils.isVplexInitiator(firstInitiator, _dbClient) && !ExportUtils.checkIfInitiatorsForRP(Arrays.asList(firstInitiator));
        _log.info("Orchestration level validation needed : {}", isValidationNeeded);
        Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
        List<String> portNames = new ArrayList<String>();
        // Populate the port WWN/IQNs (portNames) and the
        // mapping of the WWN/IQNs to Initiator URIs
        processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI);
        // Populate a map of volumes on the storage device associated with this ExportGroup
        List<BlockObject> blockObjects = new ArrayList<BlockObject>();
        if (exportGroup.getVolumes() != null) {
            for (Map.Entry<String, String> entry : exportGroup.getVolumes().entrySet()) {
                URI boURI = URI.create(entry.getKey());
                BlockObject bo = BlockObject.fetch(_dbClient, boURI);
                if (bo.getStorageController().equals(storageURI)) {
                    blockObjects.add(bo);
                }
            }
        }
        List<String> initiatorNames = new ArrayList<String>();
        for (URI initiatorURI : initiatorURIs) {
            Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
            String normalizedName = Initiator.normalizePort(initiator.getInitiatorPort());
            initiatorNames.add(normalizedName);
        }
        _log.info("Normalized initiator names :{}", initiatorNames);
        device.findExportMasks(storage, initiatorNames, false);
        Map<URI, Boolean> initiatorIsPartOfFullListFlags = flagInitiatorsThatArePartOfAFullList(exportGroup, initiatorURIs);
        boolean anyOperationsToDo = false;
        if (exportGroup != null && !ExportMaskUtils.getExportMasks(_dbClient, exportGroup).isEmpty()) {
            // There were some exports out there that already have some or all of the
            // initiators that we are attempting to remove. We need to only
            // remove the volumes that the user added to these masks
            Map<String, Set<URI>> matchingExportMaskURIs = getInitiatorToExportMaskMap(exportGroup);
            // This loop will determine a list of volumes to update per export mask
            Map<URI, List<URI>> existingMasksToRemoveInitiator = new HashMap<URI, List<URI>>();
            Map<URI, List<URI>> existingMasksToRemoveVolumes = new HashMap<URI, List<URI>>();
            for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
                URI initiatorURI = portNameToInitiatorURI.get(entry.getKey());
                if (initiatorURI == null || !initiatorURIs.contains(initiatorURI)) {
                    // Entry key points to an initiator that was not passed in the remove request
                    continue;
                }
                Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
                // Get a list of the ExportMasks that were matched to the initiator
                // go through the initiators and figure out the proper intiator and volume ramifications
                // to the existing masks.
                List<URI> exportMaskURIs = new ArrayList<URI>();
                exportMaskURIs.addAll(entry.getValue());
                List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
                _log.info(String.format("initiator %s masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
                for (ExportMask mask : masks) {
                    if (mask == null || mask.getInactive() || !mask.getStorageDevice().equals(storageURI)) {
                        continue;
                    }
                    mask = getDevice().refreshExportMask(storage, mask);
                    _log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
                    // We cannot remove initiator if there are existing volumes in the mask.
                    if (!isValidationNeeded || !mask.hasAnyExistingVolumes()) {
                        /**
                         * If user asked to remove Host from Cluster
                         * 1. Check if the export mask is shared across other export Groups, if not remove the host.
                         * 2. If shared, check whether all the initiators of host is being asked to remove
                         * 3. If yes, check if atleast one of the other shared export Group is EXCLUSIVE
                         * 4. If yes, then remove the shared volumes
                         *
                         * In all other cases, remove the initiators.
                         */
                        List<ExportGroup> otherExportGroups = ExportUtils.getOtherExportGroups(exportGroup, mask, _dbClient);
                        if (!otherExportGroups.isEmpty() && initiatorIsPartOfFullListFlags.get(initiatorURI) && ExportUtils.exportMaskHasBothExclusiveAndSharedVolumes(exportGroup, otherExportGroups, mask)) {
                            if (!exportGroup.forInitiator()) {
                                List<URI> removeVolumesList = existingMasksToRemoveVolumes.get(mask.getId());
                                if (removeVolumesList == null) {
                                    removeVolumesList = new ArrayList<URI>();
                                    existingMasksToRemoveVolumes.put(mask.getId(), removeVolumesList);
                                }
                                for (String volumeIdStr : exportGroup.getVolumes().keySet()) {
                                    URI egVolumeID = URI.create(volumeIdStr);
                                    if (mask.getUserAddedVolumes().containsValue(volumeIdStr) && !removeVolumesList.contains(egVolumeID)) {
                                        removeVolumesList.add(egVolumeID);
                                    }
                                }
                            } else {
                                // Just a reminder to the world in the case where Initiator is used in this odd situation.
                                _log.info("Removing volumes from an Initiator type export group as part of an initiator removal is not supported.");
                            }
                        } else {
                            _log.info(String.format("We can remove initiator %s from mask %s", initiator.getInitiatorPort(), mask.getMaskName()));
                            List<URI> initiators = existingMasksToRemoveInitiator.get(mask.getId());
                            if (initiators == null) {
                                initiators = new ArrayList<URI>();
                                existingMasksToRemoveInitiator.put(mask.getId(), initiators);
                            }
                            if (!initiators.contains(initiator.getId())) {
                                initiators.add(initiator.getId());
                            }
                        }
                    } else {
                        errorMessage.append(String.format("Mask %s has existing volumes %s", mask.forDisplay(), Joiner.on(", ").join(mask.getExistingVolumes().keySet())));
                    }
                }
            }
            // At this point we have a mapping of masks to objects that we want to remove
            Set<URI> masksGettingRemoved = new HashSet<URI>();
            // In this loop we are trying to remove those initiators that exist
            // on a mask that ViPR created.
            Map<URI, String> stepMap = new HashMap<URI, String>();
            for (Map.Entry<URI, List<URI>> entry : existingMasksToRemoveInitiator.entrySet()) {
                ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
                List<URI> initiatorsToRemove = entry.getValue();
                Set<String> allInitiators = ExportUtils.getExportMaskAllInitiatorPorts(mask, _dbClient);
                List<Initiator> initiatorObjectsToRemove = _dbClient.queryObject(Initiator.class, initiatorsToRemove);
                List<String> initiatorPortNamesToRemove = new ArrayList<>(Collections2.transform(initiatorObjectsToRemove, CommonTransformerFunctions.fctnInitiatorToPortName()));
                allInitiators.removeAll(initiatorPortNamesToRemove);
                if (allInitiators.isEmpty()) {
                    masksGettingRemoved.add(mask.getId());
                    // For this case, we are attempting to remove all the
                    // initiators in the mask. This means that we will have to
                    // delete the
                    // exportGroup
                    _log.info(String.format("mask %s has removed all " + "initiators, we are going to delete the mask from the " + "array", mask.getMaskName()));
                    List<URI> maskVolumeURIs = ExportMaskUtils.getUserAddedVolumeURIs(mask);
                    List<URI> maskInitiatorURIs = Lists.newArrayList(Collections2.transform(ExportMaskUtils.getInitiatorsForExportMask(_dbClient, mask, null), CommonTransformerFunctions.fctnDataObjectToID()));
                    stepMap.put(entry.getKey(), generateDeviceSpecificDeleteWorkflow(workflow, null, exportGroup, mask, maskVolumeURIs, maskInitiatorURIs, storage));
                    anyOperationsToDo = true;
                } else {
                    _log.info(String.format("mask %s - going to remove the " + "following initiators %s", mask.getMaskName(), Joiner.on(',').join(initiatorsToRemove)));
                    Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
                    maskToInitiatorsMap.put(mask.getId(), initiatorsToRemove);
                    List<URI> maskVolumeURIs = ExportMaskUtils.getUserAddedVolumeURIs(mask);
                    stepMap.put(entry.getKey(), generateDeviceSpecificRemoveInitiatorsWorkflow(workflow, null, exportGroup, mask, storage, maskToInitiatorsMap, maskVolumeURIs, initiatorsToRemove, true));
                    anyOperationsToDo = true;
                }
            }
            // for the storage array and ExportGroup.
            for (Map.Entry<URI, List<URI>> entry : existingMasksToRemoveVolumes.entrySet()) {
                if (masksGettingRemoved.contains(entry.getKey())) {
                    _log.info("Mask {} is getting removed, no need to remove volumes from it", entry.getKey().toString());
                    continue;
                }
                ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
                List<URI> volumesToRemove = entry.getValue();
                List<URI> initiatorsToRemove = existingMasksToRemoveInitiator.get(mask.getId());
                if (initiatorsToRemove != null) {
                    List<URI> initiatorsInExportMask = ExportUtils.getExportMaskAllInitiators(mask, _dbClient);
                    initiatorsInExportMask.removeAll(initiatorsToRemove);
                    if (!initiatorsInExportMask.isEmpty()) {
                        // There are still some initiators in this ExportMask
                        _log.info(String.format("ExportMask %s would have remaining initiators {%s} that require access to {%s}. " + "Not going to remove any of the volumes", mask.getMaskName(), Joiner.on(',').join(initiatorsInExportMask), Joiner.on(',').join(volumesToRemove)));
                        continue;
                    }
                }
                Collection<String> volumesToRemoveURIStrings = Collections2.transform(volumesToRemove, CommonTransformerFunctions.FCTN_URI_TO_STRING);
                List<String> exportMaskVolumeURIStrings = new ArrayList<String>(mask.getVolumes().keySet());
                exportMaskVolumeURIStrings.removeAll(volumesToRemoveURIStrings);
                if (exportMaskVolumeURIStrings.isEmpty() && !mask.hasAnyExistingVolumes()) {
                    _log.info(String.format("All the volumes (%s) from mask %s will be removed, so will have to remove the whole mask", Joiner.on(",").join(volumesToRemove), mask.getMaskName()));
                    errorMessage.append(String.format("Mask %s will be removed from array. ", mask.forDisplay()));
                    List<URI> maskVolumeURIs = ExportMaskUtils.getUserAddedVolumeURIs(mask);
                    List<URI> maskInitiatorURIs = Lists.newArrayList(Collections2.transform(ExportMaskUtils.getInitiatorsForExportMask(_dbClient, mask, null), CommonTransformerFunctions.fctnDataObjectToID()));
                    generateDeviceSpecificDeleteWorkflow(workflow, null, exportGroup, mask, maskVolumeURIs, maskInitiatorURIs, storage);
                    anyOperationsToDo = true;
                } else {
                    // Null taskID is passed in because the generateExportMaskRemoveVolumesWorkflow will fill it in
                    ExportTaskCompleter completer = new ExportRemoveVolumesOnAdoptedMaskCompleter(exportGroupURI, mask.getId(), volumesToRemove, null);
                    _log.info(String.format("A subset of volumes will be removed from mask %s: %s. ", mask.getMaskName(), Joiner.on(",").join(volumesToRemove)));
                    List<? extends BlockObject> boList = BlockObject.fetchAll(_dbClient, volumesToRemove);
                    if (mask.hasAnyExistingInitiators()) {
                        errorMessage.append(String.format("A subset of volumes will be removed from mask %s: %s. This will affect the %s initiators", mask.getMaskName(), Joiner.on(", ").join(Collections2.transform(boList, CommonTransformerFunctions.fctnDataObjectToForDisplay())), mask.getExistingInitiators()));
                    }
                    List<URI> maskInitiatorURIs = Lists.newArrayList(Collections2.transform(ExportMaskUtils.getInitiatorsForExportMask(_dbClient, mask, null), CommonTransformerFunctions.fctnDataObjectToID()));
                    generateDeviceSpecificRemoveVolumesWorkflow(workflow, stepMap.get(entry.getKey()), exportGroup, mask, storage, volumesToRemove, maskInitiatorURIs, completer);
                    anyOperationsToDo = true;
                }
            }
        }
        if (errorMessage != null && !errorMessage.toString().isEmpty()) {
            _log.warn("Error Message {}", errorMessage);
        }
        if (isValidationNeeded && StringUtils.hasText(errorMessage)) {
            throw DeviceControllerException.exceptions.removeInitiatorValidationError(Joiner.on(", ").join(initiatorNames), storage.getLabel(), errorMessage.toString());
        }
        if (anyOperationsToDo) {
            String successMessage = String.format("Successfully removed exports for initiators on StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception e) {
        _log.error("ExportGroup remove initiator Orchestration failed.", e);
        // TODO add service code here
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : ExportTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter) HashSet(java.util.HashSet) Set(java.util.Set) StringSet(com.emc.storageos.db.client.model.StringSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) URI(java.net.URI) BlockStorageDevice(com.emc.storageos.volumecontroller.BlockStorageDevice) Initiator(com.emc.storageos.db.client.model.Initiator) ArrayList(java.util.ArrayList) List(java.util.List) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) HashSet(java.util.HashSet) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportRemoveVolumesOnAdoptedMaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportRemoveVolumesOnAdoptedMaskCompleter) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) HashMap(java.util.HashMap) Map(java.util.Map) StringSetMap(com.emc.storageos.db.client.model.StringSetMap) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask)

Example 77 with StorageSystem

use of com.emc.storageos.db.client.model.StorageSystem in project coprhd-controller by CoprHD.

the class AbstractBasicMaskingOrchestrator method exportGroupDelete.

@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
        taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        String previousStep = null;
        boolean someOperationDone = false;
        logExportGroup(exportGroup, storageURI);
        if (!ExportMaskUtils.getExportMasks(_dbClient, exportGroup).isEmpty() && !exportGroup.getInactive()) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token);
            List<ExportMask> exportMasksToZoneDelete = new ArrayList<ExportMask>();
            List<ExportMask> exportMasksToZoneRemoveVolumes = new ArrayList<ExportMask>();
            Set<URI> volumesToZoneRemoveVolumes = new HashSet<URI>();
            List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup);
            for (ExportMask exportMask : exportMasks) {
                taskCompleter.setMask(exportMask.getId());
                _log.info(String.format("Checking mask %s", exportMask.getMaskName()));
                if (!exportMask.getInactive() && exportMask.getStorageDevice().equals(storageURI)) {
                    exportMask = getDevice().refreshExportMask(storage, exportMask);
                    Collection<URI> volumeURIs = Collections2.transform(exportGroup.getVolumes().keySet(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
                    // One way to know if we should delete the mask is if all of the volumes in the mask
                    // are represented in the export group.
                    boolean deleteEntireMask = removingLastExportMaskVolumes(exportMask, new ArrayList<>(volumeURIs));
                    _log.info("deleteEntireMask for {}? {}", exportMask.getId(), deleteEntireMask);
                    Set<URI> volumesToRemove = new HashSet<>();
                    if (exportGroup.getInitiators() != null && !exportGroup.getInitiators().isEmpty()) {
                        Set<String> egInitiators = new HashSet<String>(exportGroup.getInitiators());
                        for (String initiatorIdStr : egInitiators) {
                            Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorIdStr));
                            if (initiator == null) {
                                _log.warn("Found that initiator " + initiatorIdStr + " in the export group is no longer in the database, removing from the initiator list.");
                                exportGroup.removeInitiator(URI.create(initiatorIdStr));
                                _dbClient.updateObject(exportGroup);
                                continue;
                            }
                            // Search for this initiator in another export group
                            List<ExportGroup> exportGroupList = ExportUtils.getInitiatorExportGroups(initiator, _dbClient);
                            // We cannot remove initiator from mask if the mask has existing volumes
                            if (exportMask.hasUserInitiator(URI.create(initiatorIdStr)) && !exportMask.hasAnyExistingVolumes()) {
                                // Best to just leave that initiator alone.
                                if ((exportGroupList != null && exportGroupList.size() > 1) && ExportUtils.isExportMaskShared(_dbClient, exportMask.getId(), null)) {
                                    _log.info(String.format("Found that my initiator is in %s more export groups, so we shouldn't remove it from the mask", exportGroupList.size() - 1));
                                    deleteEntireMask = false;
                                }
                            }
                        }
                    }
                    if (deleteEntireMask) {
                        _log.info(String.format("export_delete: export mask %s was either created by system or last volume is being removed.", exportMask.getMaskName()));
                        exportMasksToZoneDelete.add(exportMask);
                        someOperationDone = true;
                    } else {
                        // Volume removal -- check to see if that volume is already in another export group with that initiator.
                        for (String volumeIdStr : exportGroup.getVolumes().keySet()) {
                            URI egVolumeID = URI.create(volumeIdStr);
                            BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, egVolumeID);
                            if (bo != null && exportMask.hasUserCreatedVolume(bo.getId())) {
                                if (exportGroup.getInitiators() != null) {
                                    for (String initiatorIdStr : exportGroup.getInitiators()) {
                                        if (exportMask.hasInitiator(initiatorIdStr)) {
                                            Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorIdStr));
                                            List<ExportGroup> exportGroupList2 = ExportUtils.getInitiatorVolumeExportGroups(initiator, egVolumeID, _dbClient);
                                            if (exportGroupList2 != null && exportGroupList2.size() > 1) {
                                                _log.info(String.format("Found that my volume %s is in another export group with this initiator %s, so we shouldn't remove it from the mask", volumeIdStr, initiator.getInitiatorPort()));
                                            } else {
                                                _log.info(String.format("We can remove volume %s from mask %s", volumeIdStr, exportMask.getMaskName()));
                                                volumesToRemove.add(egVolumeID);
                                            }
                                        } else if (exportMask.getCreatedBySystem()) {
                                            _log.info(String.format("Export Mask %s does not contain initiator %s, so we will not modify this export mask", exportMask.getId().toString(), initiatorIdStr));
                                        } else {
                                            // We're in a case where there are no user added initiators for this *existing* mask. So, we
                                            // should be able remove any
                                            // of the volumes that we added to the system.
                                            volumesToRemove.add(egVolumeID);
                                        }
                                    }
                                }
                            }
                        }
                        // Remove volume steps are generated based on the initiators we collected for removal.
                        if (!volumesToRemove.isEmpty()) {
                            _log.info(String.format("Mask %s, Removing volumes %s only", exportMask.getMaskName(), Joiner.on(',').join(volumesToRemove)));
                            _log.info(String.format("volumes in mask: %s", Joiner.on(',').join(exportMask.getVolumes().entrySet())));
                            exportMasksToZoneRemoveVolumes.add(exportMask);
                            volumesToZoneRemoveVolumes.addAll(volumesToRemove);
                            List<URI> maskInitiatorURIs = Lists.newArrayList(Collections2.transform(ExportMaskUtils.getInitiatorsForExportMask(_dbClient, exportMask, null), CommonTransformerFunctions.fctnDataObjectToID()));
                            previousStep = generateDeviceSpecificRemoveVolumesWorkflow(workflow, previousStep, exportGroup, exportMask, storage, new ArrayList<URI>(volumesToRemove), maskInitiatorURIs, null);
                            someOperationDone = true;
                        }
                    }
                }
            }
            if (!exportMasksToZoneDelete.isEmpty()) {
                for (ExportMask exportMask : exportMasksToZoneDelete) {
                    List<URI> volumeURIs = ExportMaskUtils.getUserAddedVolumeURIs(exportMask);
                    List<URI> maskInitiatorURIs = Lists.newArrayList(Collections2.transform(ExportMaskUtils.getInitiatorsForExportMask(_dbClient, exportMask, null), CommonTransformerFunctions.fctnDataObjectToID()));
                    previousStep = generateDeviceSpecificExportMaskDeleteWorkflow(workflow, previousStep, exportGroup, exportMask, volumeURIs, maskInitiatorURIs, storage);
                }
                // CTRL-8506 - VNX StorageGroup cannot be deleted because of a race condition with
                // the zoning. This is a live host test case. So, some initiators are still logged
                // in by the time ViPR tries to delete the StorageGroup.
                // General Solution:
                // When we have to delete ExportMask, we'll un-zone first so that any initiators
                // that are possibly logged into the array get a chance to log out. That way, there
                // should not be any problems with removing the ExportMask off the array.
                // 
                // COP-24183: Reversing the order with serialization to prevent DU if mask validation fails.
                previousStep = generateDeviceSpecificZoningDeleteWorkflow(workflow, previousStep, exportGroup, exportMasksToZoneDelete);
            }
            if (!exportMasksToZoneRemoveVolumes.isEmpty()) {
                // Remove all the indicated volumes from the indicated
                // export masks.
                generateDeviceSpecificZoningRemoveVolumesWorkflow(workflow, previousStep, exportGroup, exportMasksToZoneRemoveVolumes, new ArrayList<URI>(volumesToZoneRemoveVolumes));
            }
            String successMessage = String.format("Successfully removed export on StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        }
        if (!someOperationDone) {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        // TODO add service code here
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Initiator(com.emc.storageos.db.client.model.Initiator) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) HashSet(java.util.HashSet)

Example 78 with StorageSystem

use of com.emc.storageos.db.client.model.StorageSystem in project coprhd-controller by CoprHD.

the class AbstractMaskingFirstOrchestrator method exportGroupCreate.

/**
 * Create storage level masking components to support the requested
 * ExportGroup object. This operation will be flexible enough to take into
 * account initiators that are in some already existent in some
 * StorageGroup. In such a case, the underlying masking component will be
 * "adopted" by the ExportGroup. Further operations against the "adopted"
 * mask will only allow for addition and removal of those initiators/volumes
 * that were added by a Bourne request. Existing initiators/volumes will be
 * maintained.
 *
 * @param storageURI - URI referencing underlying storage array
 * @param exportGroupURI - URI referencing Bourne-level masking, ExportGroup
 * @param initiatorURIs - List of Initiator URIs
 * @param volumeMap - Map of Volume URIs to requested Integer URI
 * @param token - Identifier for operation
 * @throws Exception
 */
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
        BlockStorageDevice device = getDevice();
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
            _log.info("export_create: initiator list non-empty");
            createWorkFlowAndSubmitForExportGroupCreate(initiatorURIs, volumeMap, token, taskCompleter, device, exportGroup, storage);
        } else {
            _log.info("export_create: initiator list is empty");
            taskCompleter.ready(_dbClient);
        }
    } catch (DeviceControllerException dex) {
        if (taskCompleter != null) {
            taskCompleter.error(_dbClient, DeviceControllerException.errors.vmaxExportGroupCreateError(dex.getMessage()));
        }
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        // TODO add service code here
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) BlockStorageDevice(com.emc.storageos.volumecontroller.BlockStorageDevice) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) WorkflowException(com.emc.storageos.workflow.WorkflowException) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 79 with StorageSystem

use of com.emc.storageos.db.client.model.StorageSystem in project coprhd-controller by CoprHD.

the class StoragePortService method updateStoragePort.

/**
 * Updates Network for the storage port with the passed
 * id and/or updates the virtual arrays to which the storage
 * port is assigned.
 * <p>
 * A port's network is used to determine to which initiators the port can be exported. It also determines the port's virtual arrays when
 * the port is not explicitly assigned to virtual arrays ( see {@link StoragePort#getAssignedVirtualArrays()}). In this case the port's
 * virtual arrays are the same as its networks virtual arrays (see {@link StoragePort#getConnectedVirtualArrays()}). Implicit virtual
 * arrays cannot be removed, they can only be overridden by an explicit assignment or automatically unassigned when the network is
 * unassigned from a virtual array. A port's effective virtual array assignment is {@link StoragePort#getTaggedVirtualArrays()}).
 * <p>
 * A port can be explicitly assigned to virtual arrays and this overrides the implicit assignment resulting from the network
 * association. If the explicit assignment is removed, the implicit assignment becomes effective again.
 * <p>
 * Managing ports virtual array assignments requires planning. In general, networks need not to be assigned to virtual arrays unless
 * implicit assignments of ports are desired.
 *
 * @param id the URN of a ViPR storage port.
 * @param storagePortUpdates Specifies the updates to be made to the storage
 *            port
 *
 * @brief Update storage port network and/or virtual array assignments.
 * @return A StoragePortRestRep specifying the updated storage port info.
 */
@PUT
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}")
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN })
public StoragePortRestRep updateStoragePort(@PathParam("id") URI id, StoragePortUpdate storagePortUpdates) {
    // Get the storage port with the passed id.
    ArgValidator.checkFieldUriType(id, StoragePort.class, "id");
    StoragePort storagePort = queryResource(id);
    _log.info("Update called for storage port {}", id);
    // If the port is a VPLEX, then before any changes are
    // made for the port, get the storage pools for the systems
    // connected to the VPLEX. These pools and the vpools they
    // match may be impacted by the change to the VPLEX storage
    // port. We must get these ports now before any changes are
    // persisted for the port as the connected systems may
    // change and we would not get all potentially impacted pools.
    List<StoragePool> modifiedPools = null;
    URI systemURI = storagePort.getStorageDevice();
    StorageSystem system = _dbClient.queryObject(StorageSystem.class, systemURI);
    if (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType())) {
        modifiedPools = StoragePoolAssociationHelper.getStoragePoolsFromPorts(_dbClient, Arrays.asList(storagePort), null, true);
    }
    // Get the old network as part of storage port.
    URI oldNetworkId = storagePort.getNetwork();
    // Update the storage port network assignment.
    URI newNetworkId = storagePortUpdates.getNetwork();
    _log.info("Checking for updates to storage port network.");
    boolean networkUpdated = updateStoragePortNetwork(storagePort, newNetworkId);
    if (networkUpdated) {
        _log.info("Storage port network has been modified.");
        // No need to update pool connectivity because the call to network service handles that
        // Get the updated reference.
        storagePort = queryResource(id);
    }
    // Update the storage port virtual array assignments.
    _log.info("Checking for updates to storage port virtual array assignments.");
    boolean virtualArraysUpdated = updateStoragePortVirtualArrays(storagePort, storagePortUpdates.getVarrayChanges());
    /**
     * This is applicable only for Cinder Storage System's port
     * as currently there is no API to discover it from Cinder.
     * So, it requires user to update the value for provisioning operations.
     */
    boolean portNetworkIdUpdated = updatePortNetworkId(storagePort, storagePortUpdates.getPortNetworkId());
    // associations when a storage port is modified.
    if (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType())) {
        List<StoragePool> pools = StoragePoolAssociationHelper.getStoragePoolsFromPorts(_dbClient, Arrays.asList(storagePort), null, true);
        if ((modifiedPools == null) || (modifiedPools.isEmpty())) {
            modifiedPools = pools;
        } else {
            List<StoragePool> poolsToAdd = new ArrayList<StoragePool>();
            for (StoragePool pool : pools) {
                URI poolURI = pool.getId();
                boolean poolFound = false;
                for (StoragePool modifiedPool : modifiedPools) {
                    if (poolURI.equals(modifiedPool.getId())) {
                        poolFound = true;
                        break;
                    }
                }
                if (!poolFound) {
                    poolsToAdd.add(pool);
                }
            }
            modifiedPools.addAll(poolsToAdd);
        }
    }
    if (networkUpdated || portNetworkIdUpdated) {
        _log.info("Storage port was moved to other network.");
        // this method runs standard procedure for poolmatcher, rp connectivity
        StoragePortAssociationHelper.runUpdatePortAssociationsProcess(Collections.singleton(storagePort), null, _dbClient, _coordinator, modifiedPools);
    } else if (virtualArraysUpdated) {
        _log.info("Storage port virtual arrays have been modified.");
        // this method runs optimized procedure for poolmatcher, rp connectivity
        StoragePortAssociationHelper.runUpdatePortAssociationsProcessForVArrayChange(storagePort, _dbClient, _coordinator, modifiedPools, storagePortUpdates.getVarrayChanges());
    }
    // Update the virtual nas virtual arrays with network virtual arrays!!!
    if (DiscoveredDataObject.Type.vnxfile.name().equals(system.getSystemType()) || DiscoveredDataObject.Type.isilon.name().equals(system.getSystemType())) {
        Network newNetwork = null;
        boolean removePort = false;
        if (networkUpdated) {
            if (!NullColumnValueGetter.isNullURI(newNetworkId)) {
                _log.info("New network {} specified for vNAS storage port ", newNetworkId);
                // Validate the new network exists and is active.
                newNetwork = _dbClient.queryObject(Network.class, newNetworkId);
            } else if (!NullColumnValueGetter.isNullURI(oldNetworkId)) {
                _log.info("Removing network {} from vNAS storage port ", oldNetworkId);
                // Validate the new network exists and is active.
                newNetwork = _dbClient.queryObject(Network.class, oldNetworkId);
                removePort = true;
            }
            // Update the virtual nas virtual array assignments.
            _log.info("Checking for updates to virtual nas virtual array assignments.");
            boolean vNasVirtualArraysUpdated = updatevNasVirtualArrays(storagePort, newNetwork, storagePortUpdates.getVarrayChanges(), removePort);
        }
    }
    // event.
    if (networkUpdated || virtualArraysUpdated || portNetworkIdUpdated) {
        // Create the audit log entry.
        auditOp(OperationTypeEnum.UPDATE_STORAGE_PORT, true, null, storagePort.getLabel(), id.toString());
        // Record the storage port update event.
        recordStoragePortEvent(OperationTypeEnum.STORAGE_PORT_UPDATE, STORAGEPORT_UPDATED_DESCRIPTION, storagePort.getId());
    }
    return MapStoragePort.getInstance(_dbClient).toStoragePortRestRep(storagePort);
}
Also used : StoragePool(com.emc.storageos.db.client.model.StoragePool) Network(com.emc.storageos.db.client.model.Network) MapStoragePort(com.emc.storageos.api.mapper.functions.MapStoragePort) StoragePort(com.emc.storageos.db.client.model.StoragePort) ArrayList(java.util.ArrayList) URI(java.net.URI) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) PUT(javax.ws.rs.PUT) CheckPermission(com.emc.storageos.security.authorization.CheckPermission)

Example 80 with StorageSystem

use of com.emc.storageos.db.client.model.StorageSystem in project coprhd-controller by CoprHD.

the class StorageProviderService method addStorageSystem.

/**
 * Allows the user to add a storage system and rescans the provider.
 * After that corresponding provider should be able to be rescanned and add this system back to the list of managed systems.
 *
 * @param id id the URN of a ViPR Storage provider
 * @param param The storage system details.
 *
 * @brief Add a new storage system and rescan the provider.
 * @return An asynchronous task corresponding to the scan job scheduled for the provider.
 *
 * @throws BadRequestException When the system type is not valid or a
 *             storage system with the same native guid already exists.
 * @throws com.emc.storageos.db.exceptions.DatabaseException When an error occurs querying the database.
 * @throws ControllerException When an error occurs discovering the storage
 *             system.
 */
@PUT
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.SYSTEM_ADMIN })
@Path("/{id}/storage-systems")
public TaskResourceRep addStorageSystem(@PathParam("id") URI id, StorageSystemProviderRequestParam param) throws ControllerException {
    TaskResourceRep taskRep;
    URIQueryResultList list = new URIQueryResultList();
    ArgValidator.checkFieldNotEmpty(param.getSystemType(), "system_type");
    if (!StorageSystem.Type.isProviderStorageSystem(param.getSystemType())) {
        throw APIException.badRequests.cannotAddStorageSystemTypeToStorageProvider(param.getSystemType());
    }
    StorageProvider provider = _dbClient.queryObject(StorageProvider.class, id);
    ArgValidator.checkEntityNotNull(provider, id, isIdEmbeddedInURL(id));
    ArgValidator.checkFieldNotEmpty(param.getSerialNumber(), "serialNumber");
    String nativeGuid = NativeGUIDGenerator.generateNativeGuid(param.getSystemType(), param.getSerialNumber());
    // check for duplicate StorageSystem.
    List<StorageSystem> systems = CustomQueryUtility.getActiveStorageSystemByNativeGuid(_dbClient, nativeGuid);
    if (systems != null && !systems.isEmpty()) {
        throw APIException.badRequests.invalidParameterProviderStorageSystemAlreadyExists("nativeGuid", nativeGuid);
    }
    int cleared = DecommissionedResource.removeDecommissionedFlag(_dbClient, nativeGuid, StorageSystem.class);
    if (cleared == 0) {
        log.info("Cleared {} decommissioned systems", cleared);
    } else {
        log.info("Did not find any decommissioned systems to clear. Continue to scan.");
    }
    ArrayList<AsyncTask> tasks = new ArrayList<AsyncTask>(1);
    String taskId = UUID.randomUUID().toString();
    tasks.add(new AsyncTask(StorageProvider.class, provider.getId(), taskId));
    BlockController controller = getController(BlockController.class, provider.getInterfaceType());
    DiscoveredObjectTaskScheduler scheduler = new DiscoveredObjectTaskScheduler(_dbClient, new ScanJobExec(controller));
    TaskList taskList = scheduler.scheduleAsyncTasks(tasks);
    return taskList.getTaskList().listIterator().next();
}
Also used : BlockController(com.emc.storageos.volumecontroller.BlockController) TaskList(com.emc.storageos.model.TaskList) AsyncTask(com.emc.storageos.volumecontroller.AsyncTask) ArrayList(java.util.ArrayList) TaskResourceRep(com.emc.storageos.model.TaskResourceRep) DiscoveredObjectTaskScheduler(com.emc.storageos.api.service.impl.resource.utils.DiscoveredObjectTaskScheduler) MapStorageProvider(com.emc.storageos.api.mapper.functions.MapStorageProvider) StorageProvider(com.emc.storageos.db.client.model.StorageProvider) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) Path(javax.ws.rs.Path) Consumes(javax.ws.rs.Consumes) Produces(javax.ws.rs.Produces) PUT(javax.ws.rs.PUT) CheckPermission(com.emc.storageos.security.authorization.CheckPermission)

Aggregations

StorageSystem (com.emc.storageos.db.client.model.StorageSystem)1088 URI (java.net.URI)581 ArrayList (java.util.ArrayList)424 DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)319 Volume (com.emc.storageos.db.client.model.Volume)299 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)272 DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)258 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)246 NamedURI (com.emc.storageos.db.client.model.NamedURI)243 WorkflowException (com.emc.storageos.workflow.WorkflowException)233 ControllerException (com.emc.storageos.volumecontroller.ControllerException)231 HashMap (java.util.HashMap)172 StoragePool (com.emc.storageos.db.client.model.StoragePool)159 BaseCollectionException (com.emc.storageos.plugins.BaseCollectionException)158 StringSet (com.emc.storageos.db.client.model.StringSet)156 URISyntaxException (java.net.URISyntaxException)145 List (java.util.List)139 IOException (java.io.IOException)136 URIQueryResultList (com.emc.storageos.db.client.constraint.URIQueryResultList)127 Workflow (com.emc.storageos.workflow.Workflow)126