Search in sources :

Example 91 with Workflow

use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.

the class VnxMaskingOrchestrator method exportGroupAddVolumes.

@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
        BlockStorageDevice device = getDevice();
        taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        logExportGroup(exportGroup, storageURI);
        boolean anyVolumesAdded = false;
        boolean createdNewMask = false;
        if (exportGroup != null && exportGroup.getExportMasks() != null) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);
            List<ExportMask> exportMasksToZoneAddVolumes = new ArrayList<ExportMask>();
            List<URI> volumesToZoneAddVolumes = new ArrayList<URI>();
            // Add the volume to all the ExportMasks that are contained in the
            // ExportGroup. The volumes should be added only if they don't
            // already exist for the ExportMask.
            Collection<URI> initiatorURIs = Collections2.transform(exportGroup.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
            List<URI> hostURIs = new ArrayList<URI>();
            Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
            List<String> portNames = new ArrayList<String>();
            processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
            // We always want to have the full list of initiators for the hosts involved in
            // this export. This will allow the export operation to always find any
            // existing exports for a given host.
            queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);
            Map<String, Set<URI>> foundMatches = device.findExportMasks(storage, portNames, false);
            findAndUpdateFreeHLUsForClusterExport(storage, exportGroup, new ArrayList<URI>(initiatorURIs), volumeMap);
            Set<String> checkMasks = mergeWithExportGroupMaskURIs(exportGroup, foundMatches.values());
            for (String maskURIStr : checkMasks) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, URI.create(maskURIStr));
                _log.info(String.format("Checking mask %s", exportMask.getMaskName()));
                // Check for NO_VIPR. If found, avoid this mask.
                if (exportMask.getMaskName() != null && exportMask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
                    _log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", exportMask.getMaskName(), ExportUtils.NO_VIPR));
                    continue;
                }
                if (!exportMask.getInactive() && exportMask.getStorageDevice().equals(storageURI)) {
                    exportMask = device.refreshExportMask(storage, exportMask);
                    // BlockStorageDevice level, so that it has up-to-date
                    // info from the array
                    Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
                    for (URI boURI : volumeMap.keySet()) {
                        BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, boURI);
                        if (bo != null && !exportMask.hasExistingVolume(bo.getWWN()) && !exportMask.hasUserAddedVolume(bo.getWWN())) {
                            URI thisVol = bo.getId();
                            Integer hlu = volumeMap.get(boURI);
                            volumesToAdd.put(thisVol, hlu);
                        }
                        // volumes
                        if (bo != null && exportMask.hasExistingVolume(bo.getWWN())) {
                            exportMask.removeFromExistingVolumes(bo);
                            exportMask.addToUserCreatedVolumes(bo);
                            _dbClient.updateObject(exportMask);
                        }
                        // Check if the requested HLU for the volume is
                        // already taken by a pre-existing volume.
                        Integer requestedHLU = volumeMap.get(boURI);
                        StringMap existingVolumesInMask = exportMask.getExistingVolumes();
                        if (existingVolumesInMask != null && requestedHLU.intValue() != ExportGroup.LUN_UNASSIGNED && !ExportGroup.LUN_UNASSIGNED_DECIMAL_STR.equals(requestedHLU.toString()) && existingVolumesInMask.containsValue(requestedHLU.toString())) {
                            ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
                            ServiceError serviceError = DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(boURI.toString(), requestedHLU.toString());
                            completer.error(_dbClient, serviceError);
                            return;
                        }
                    }
                    _log.info(String.format("Mask %s, adding volumes %s", exportMask.getMaskName(), Joiner.on(',').join(volumesToAdd.entrySet())));
                    if (volumesToAdd.size() > 0) {
                        List<URI> volumeURIs = new ArrayList<URI>();
                        volumeURIs.addAll(volumesToAdd.keySet());
                        exportMasksToZoneAddVolumes.add(exportMask);
                        volumesToZoneAddVolumes.addAll(volumeURIs);
                        // This is the list of export masks where volumes will be added
                        // some may be user-created and being 'accepted' into ViPR for
                        // the first time. Need to update zoning map
                        updateZoningMap(exportGroup, exportMask, true);
                        generateExportMaskAddVolumesWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, exportMask, volumesToAdd, null);
                        anyVolumesAdded = true;
                        // associated it with the ExportGroup.
                        if (!exportGroup.hasMask(exportMask.getId())) {
                            exportGroup.addExportMask(exportMask.getId());
                            _dbClient.updateAndReindexObject(exportGroup);
                        }
                    }
                }
            }
            if (!anyVolumesAdded) {
                String attachGroupSnapshot;
                // masks and if there are initiators for the export.
                if (!ExportMaskUtils.hasExportMaskForStorage(_dbClient, exportGroup, storageURI) && exportGroup.hasInitiators()) {
                    _log.info("No existing masks to which the requested volumes can be added. Creating a new mask");
                    List<URI> initiators = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
                    attachGroupSnapshot = checkForSnapshotsToCopyToTarget(workflow, storage, null, volumeMap, null);
                    Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
                    for (URI newExportMaskInitiator : initiators) {
                        Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
                        // Not all initiators have hosts, be sure to handle either case.
                        URI hostURI = initiator.getHost();
                        if (hostURI == null) {
                            hostURI = NullColumnValueGetter.getNullURI();
                        }
                        List<URI> initiatorSet = hostInitiatorMap.get(hostURI);
                        if (initiatorSet == null) {
                            initiatorSet = new ArrayList<URI>();
                            hostInitiatorMap.put(hostURI, initiatorSet);
                        }
                        initiatorSet.add(initiator.getId());
                        _log.info(String.format("host = %s, " + "initiators to add: %d, ", hostURI, hostInitiatorMap.get(hostURI).size()));
                    }
                    if (!hostInitiatorMap.isEmpty()) {
                        for (URI hostID : hostInitiatorMap.keySet()) {
                            _log.info(String.format("new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID))));
                            String zoningStep = workflow.createStepId();
                            GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, zoningStep, storage, exportGroup, hostInitiatorMap.get(hostID), volumeMap, token);
                            List<URI> masks = new ArrayList<URI>();
                            masks.add(result.getMaskURI());
                            generateZoningCreateWorkflow(workflow, attachGroupSnapshot, exportGroup, masks, volumeMap, zoningStep);
                        }
                        createdNewMask = true;
                    }
                }
            }
            if (!exportMasksToZoneAddVolumes.isEmpty()) {
                generateZoningAddVolumesWorkflow(workflow, null, exportGroup, exportMasksToZoneAddVolumes, volumesToZoneAddVolumes);
            }
            String successMessage = String.format("Successfully added volumes to export on StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            if (exportGroup.hasInitiators()) {
                _log.info("There are no masks for this export. Need to create anew.");
                List<URI> initiatorURIs = new ArrayList<URI>();
                for (String initiatorURIStr : exportGroup.getInitiators()) {
                    initiatorURIs.add(URI.create(initiatorURIStr));
                }
                // Invoke the export group create operation,
                // which should in turn create a workflow operations to
                // create the export for the newly added volume(s).
                exportGroupCreate(storageURI, exportGroupURI, initiatorURIs, volumeMap, token);
                anyVolumesAdded = true;
            } else {
                _log.warn("There are no initiator for export group: " + exportGroup.getLabel());
            }
        }
        if (!anyVolumesAdded && !createdNewMask) {
            taskCompleter.ready(_dbClient);
            _log.info("No volumes pushed to array because either they already exist " + "or there were no initiators added to the export yet.");
        }
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        // TODO add service code here
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : StringMap(com.emc.storageos.db.client.model.StringMap) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) URI(java.net.URI) BlockStorageDevice(com.emc.storageos.volumecontroller.BlockStorageDevice) Initiator(com.emc.storageos.db.client.model.Initiator) ArrayList(java.util.ArrayList) List(java.util.List) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask)

Example 92 with Workflow

use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.

the class VnxMaskingOrchestrator method exportGroupCreate.

/**
 * Create storage level masking components to support the requested
 * ExportGroup object. This operation will be flexible enough to take into
 * account initiators that are in some already existent in some
 * StorageGroup. In such a case, the underlying masking component will be
 * "adopted" by the ExportGroup. Further operations against the "adopted"
 * mask will only allow for addition and removal of those initiators/volumes
 * that were added by a Bourne request. Existing initiators/volumes will be
 * maintained.
 *
 * @param storageURI
 *            - URI referencing underlying storage array
 * @param exportGroupURI
 *            - URI referencing Bourne-level masking, ExportGroup
 * @param initiatorURIs
 *            - List of Initiator URIs
 * @param volumeMap
 *            - Map of Volume URIs to requested Integer URI
 * @param token
 *            - Identifier for operation
 * @throws Exception
 */
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
        BlockStorageDevice device = getDevice();
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        logExportGroup(exportGroup, storageURI);
        if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
            _log.info("export_create: initiator list non-empty");
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
            boolean createdSteps = determineExportGroupCreateSteps(workflow, null, device, storage, exportGroup, initiatorURIs, volumeMap, false, token);
            String zoningStep = generateDeviceSpecificZoningCreateWorkflow(workflow, EXPORT_GROUP_MASKING_TASK, exportGroup, null, volumeMap);
            if (createdSteps && null != zoningStep) {
                // Execute the plan and allow the WorkflowExecutor to fire the
                // taskCompleter.
                String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
                workflow.executePlan(taskCompleter, successMessage);
            } else {
                _log.info("export_create: no steps created.");
                taskCompleter.ready(_dbClient);
            }
        } else {
            _log.info("export_create: initiator list");
            taskCompleter.ready(_dbClient);
        }
    } catch (DeviceControllerException dex) {
        if (taskCompleter != null) {
            taskCompleter.error(_dbClient, DeviceControllerException.errors.vmaxExportGroupCreateError(dex.getMessage()));
        }
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        // TODO add service code here
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) BlockStorageDevice(com.emc.storageos.volumecontroller.BlockStorageDevice) Workflow(com.emc.storageos.workflow.Workflow) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 93 with Workflow

use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.

the class XIVMaskingOrchestrator method exportGroupAddVolumes.

@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
        BlockStorageDevice device = getDevice();
        taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        boolean anyVolumesAdded = false;
        boolean createdNewMask = false;
        if (exportGroup != null && exportGroup.getExportMasks() != null) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);
            List<ExportMask> exportMasksToZoneAddVolumes = new ArrayList<ExportMask>();
            List<URI> volumesToZoneAddVolumes = new ArrayList<URI>();
            // Add the volume to all the ExportMasks that are contained in the
            // ExportGroup. The volumes should be added only if they don't
            // already exist for the ExportMask.
            Collection<URI> initiatorURIs = Collections2.transform(exportGroup.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
            List<URI> hostURIs = new ArrayList<URI>();
            Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
            List<String> portNames = new ArrayList<String>();
            processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
            // We always want to have the full list of initiators for the hosts involved in
            // this export. This will allow the export operation to always find any
            // existing exports for a given host.
            queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);
            Map<String, Set<URI>> foundMatches = device.findExportMasks(storage, portNames, false);
            // Need to maintain separate Export mask for Cluster and Host.
            // So remove off the Export mask not matching to the Export Group.
            filterExportMaskForGroup(exportGroup, foundMatches);
            Set<String> checkMasks = mergeWithExportGroupMaskURIs(exportGroup, foundMatches.values());
            for (String maskURIStr : checkMasks) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, URI.create(maskURIStr));
                _log.info(String.format("Checking mask %s", exportMask.getMaskName()));
                // Check for NO_VIPR. If found, avoid this mask.
                if (exportMask.getMaskName() != null && exportMask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
                    _log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", exportMask.getMaskName(), ExportUtils.NO_VIPR));
                    continue;
                }
                if (!exportMask.getInactive() && exportMask.getStorageDevice().equals(storageURI)) {
                    exportMask = device.refreshExportMask(storage, exportMask);
                    // BlockStorageDevice level, so that it has up-to-date
                    // info from
                    // the array
                    Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
                    for (URI boURI : volumeMap.keySet()) {
                        BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, boURI);
                        if (bo != null && !exportMask.hasExistingVolume(bo.getWWN()) && !exportMask.hasUserAddedVolume(bo.getWWN())) {
                            URI thisVol = bo.getId();
                            Integer hlu = volumeMap.get(boURI);
                            volumesToAdd.put(thisVol, hlu);
                        }
                        // Check if the requested HLU for the volume is
                        // already taken by a pre-existing volume.
                        Integer requestedHLU = volumeMap.get(boURI);
                        StringMap existingVolumesInMask = exportMask.getExistingVolumes();
                        if (existingVolumesInMask != null && requestedHLU.intValue() != ExportGroup.LUN_UNASSIGNED && !ExportGroup.LUN_UNASSIGNED_DECIMAL_STR.equals(requestedHLU.toString()) && existingVolumesInMask.containsValue(requestedHLU.toString())) {
                            ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
                            ServiceError serviceError = DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(boURI.toString(), requestedHLU.toString());
                            completer.error(_dbClient, serviceError);
                            return;
                        }
                    }
                    _log.info(String.format("Mask %s, adding volumes %s", exportMask.getMaskName(), Joiner.on(',').join(volumesToAdd.entrySet())));
                    if (volumesToAdd.size() > 0) {
                        List<URI> volumeURIs = new ArrayList<URI>();
                        volumeURIs.addAll(volumesToAdd.keySet());
                        exportMasksToZoneAddVolumes.add(exportMask);
                        volumesToZoneAddVolumes.addAll(volumeURIs);
                        // Make sure the zoning map is getting updated for user-created masks
                        updateZoningMap(exportGroup, exportMask, true);
                        // Update volumeMap to find the next HLU here.
                        updateVolumeHLU(storage, initiatorURIs, volumesToAdd);
                        generateExportMaskAddVolumesWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, exportMask, volumesToAdd, null);
                        anyVolumesAdded = true;
                        // associated it with the ExportGroup.
                        if (!exportGroup.hasMask(exportMask.getId())) {
                            exportGroup.addExportMask(exportMask.getId());
                            _dbClient.updateAndReindexObject(exportGroup);
                        }
                    }
                }
            }
            if (!anyVolumesAdded) {
                String attachGroupSnapshot;
                // masks and if there are initiators for the export.
                if (!ExportMaskUtils.hasExportMaskForStorage(_dbClient, exportGroup, storageURI) && exportGroup.hasInitiators()) {
                    _log.info("No existing masks to which the requested volumes can be added. Creating a new mask");
                    List<URI> initiators = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
                    attachGroupSnapshot = checkForSnapshotsToCopyToTarget(workflow, storage, null, volumeMap, null);
                    Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
                    for (URI newExportMaskInitiator : initiators) {
                        Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
                        // Not all initiators have hosts, be sure to handle either case.
                        URI hostURI = initiator.getHost();
                        if (hostURI == null) {
                            hostURI = NullColumnValueGetter.getNullURI();
                        }
                        List<URI> initiatorSet = hostInitiatorMap.get(hostURI);
                        if (initiatorSet == null) {
                            initiatorSet = new ArrayList<URI>();
                            hostInitiatorMap.put(hostURI, initiatorSet);
                        }
                        initiatorSet.add(initiator.getId());
                        _log.info(String.format("host = %s, " + "initiators to add: %d, ", hostURI, hostInitiatorMap.get(hostURI).size()));
                    }
                    if (!hostInitiatorMap.isEmpty()) {
                        for (URI hostID : hostInitiatorMap.keySet()) {
                            _log.info(String.format("new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID))));
                            String zoningStep = workflow.createStepId();
                            GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, zoningStep, storage, exportGroup, hostInitiatorMap.get(hostID), volumeMap, token);
                            List<URI> masks = new ArrayList<URI>();
                            masks.add(result.getMaskURI());
                            generateZoningCreateWorkflow(workflow, attachGroupSnapshot, exportGroup, masks, volumeMap, zoningStep);
                        }
                        createdNewMask = true;
                    }
                }
            }
            if (!exportMasksToZoneAddVolumes.isEmpty()) {
                generateZoningAddVolumesWorkflow(workflow, null, exportGroup, exportMasksToZoneAddVolumes, volumesToZoneAddVolumes);
            }
            String successMessage = String.format("Successfully added volumes to export on StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            if (exportGroup.hasInitiators()) {
                _log.info("There are no masks for this export. Need to create anew.");
                List<URI> initiatorURIs = new ArrayList<URI>();
                for (String initiatorURIStr : exportGroup.getInitiators()) {
                    initiatorURIs.add(URI.create(initiatorURIStr));
                }
                // Invoke the export group create operation,
                // which should in turn create a workflow operations to
                // create the export for the newly added volume(s).
                exportGroupCreate(storageURI, exportGroupURI, initiatorURIs, volumeMap, token);
                anyVolumesAdded = true;
            } else {
                _log.warn("There are no initiator for export group: " + exportGroup.getLabel());
            }
        }
        if (!anyVolumesAdded && !createdNewMask) {
            taskCompleter.ready(_dbClient);
            _log.info("No volumes pushed to array because either they already exist " + "or there were no initiators added to the export yet.");
        }
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        // TODO add service code here
        if (taskCompleter != null) {
            ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
            taskCompleter.error(_dbClient, serviceError);
        }
    }
}
Also used : StringMap(com.emc.storageos.db.client.model.StringMap) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) URI(java.net.URI) BlockStorageDevice(com.emc.storageos.volumecontroller.BlockStorageDevice) Initiator(com.emc.storageos.db.client.model.Initiator) ArrayList(java.util.ArrayList) List(java.util.List) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask)

Example 94 with Workflow

use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.

the class XtremIOMaskingOrchestrator method exportGroupDelete.

@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
    try {
        log.info(String.format("exportGroupDelete start - Array: %s ExportGroup: %s", storageURI.toString(), exportGroupURI.toString()));
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        TaskCompleter taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
        if (exportGroup == null || exportGroup.getInactive() || ExportMaskUtils.getExportMasks(_dbClient, exportGroup).isEmpty()) {
            taskCompleter.ready(_dbClient);
            return;
        }
        List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
        // Set up workflow steps.
        Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token);
        String previousStep = null;
        refreshExportMask(storage, getDevice(), null);
        /**
         * TODO
         * Right now,to make orchestration simple , we decided not to share export masks across Export Groups.
         * But this rule is breaking an existing export Test case.
         * 1. If export mask is shared across export groups ,deleting an export mask means identifying the
         * right set of initiators and volumes to be removed from both the export Groups.
         */
        if (exportMasks != null && !exportMasks.isEmpty()) {
            for (ExportMask exportMask : exportMasks) {
                List<URI> initiators = StringSetUtil.stringSetToUriList(exportMask.getInitiators());
                List<URI> volumesInMask = ExportMaskUtils.getUserAddedVolumeURIs(exportMask);
                previousStep = generateExportMaskDeleteWorkflow(workflow, previousStep, storage, exportGroup, exportMask, volumesInMask, initiators, null);
            }
            previousStep = generateZoningDeleteWorkflow(workflow, previousStep, exportGroup, exportMasks);
        }
        String successMessage = String.format("Export was successfully removed from StorageArray %s", storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
        log.info(String.format("exportGroupDelete end - Array: %s ExportGroup: %s", storageURI.toString(), exportGroupURI.toString()));
    } catch (Exception e) {
        throw DeviceControllerException.exceptions.exportGroupDeleteFailed(e);
    }
}
Also used : ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) TaskCompleter(com.emc.storageos.volumecontroller.TaskCompleter) ExportTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter) URI(java.net.URI) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 95 with Workflow

use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.

the class ScaleIOMaskingOrchestrator method exportGroupRemoveVolumes.

@Override
public void exportGroupRemoveVolumes(URI storageURI, URI exportGroupURI, List<URI> volumeURIs, String token) throws Exception {
    /*
         * foreach volume in list
         * foreach initiator in ExportGroup
         * if volume not used in another ExportGroup with same initiator
         * scli unmap --volume volid --sdc initiator.sdcid
         */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
        if (masks != null && !masks.isEmpty()) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveVolumes", true, token);
            // Generate a list of Initiators
            List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
            Map<URI, List<URI>> exportToRemoveVolumesList = new HashMap<>();
            // Generate a mapping of volume URIs to the # of
            // ExportGroups that it is associated with
            Map<URI, Map<URI, Integer>> exportMaskToVolumeCount = ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);
            // remove from that ExportMask
            for (ExportMask exportMask : masks) {
                Map<URI, Integer> volumeToCountMap = exportMaskToVolumeCount.get(exportMask.getId());
                if (volumeToCountMap == null) {
                    continue;
                }
                for (Map.Entry<URI, Integer> it : volumeToCountMap.entrySet()) {
                    URI volumeURI = it.getKey();
                    Integer numberOfExportGroupsVolumesIsIn = it.getValue();
                    if (numberOfExportGroupsVolumesIsIn == 1) {
                        List<URI> volumesToRemove = exportToRemoveVolumesList.get(exportMask.getId());
                        if (volumesToRemove == null) {
                            volumesToRemove = new ArrayList<>();
                            exportToRemoveVolumesList.put(exportMask.getId(), volumesToRemove);
                        }
                        volumesToRemove.add(volumeURI);
                    }
                }
            }
            // generate a step to remove the volumes from the ExportMask
            for (Map.Entry<URI, List<URI>> entry : exportToRemoveVolumesList.entrySet()) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, entry.getKey());
                log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(entry.getValue()), exportMask.getMaskName()));
                generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, entry.getValue(), null, null);
            }
            String successMessage = String.format("ExportGroup remove volumes successfully applied for StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (DeviceControllerException dex) {
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveVolumes", dex.getMessage()));
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveVolumes", ex.getMessage()));
    }
}
Also used : HashMap(java.util.HashMap) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Aggregations

Workflow (com.emc.storageos.workflow.Workflow)285 URI (java.net.URI)204 DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)171 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)127 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)126 ControllerException (com.emc.storageos.volumecontroller.ControllerException)126 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)124 ArrayList (java.util.ArrayList)123 WorkflowException (com.emc.storageos.workflow.WorkflowException)119 NamedURI (com.emc.storageos.db.client.model.NamedURI)102 Volume (com.emc.storageos.db.client.model.Volume)76 TaskCompleter (com.emc.storageos.volumecontroller.TaskCompleter)72 HashMap (java.util.HashMap)66 DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)65 ExportGroup (com.emc.storageos.db.client.model.ExportGroup)61 ExportMask (com.emc.storageos.db.client.model.ExportMask)54 ExportTaskCompleter (com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter)54 List (java.util.List)54 BlockObject (com.emc.storageos.db.client.model.BlockObject)41 InternalServerErrorException (com.emc.storageos.svcs.errorhandling.resources.InternalServerErrorException)41