Search in sources :

Example 56 with ExportGroup

use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.

the class BlockDeviceExportController method updateVolumePathParams.

@Override
public void updateVolumePathParams(URI volumeURI, URI newVpoolURI, String opId) throws ControllerException {
    _log.info("Received request to update Volume path parameters. Creating master workflow.");
    VolumeVpoolChangeTaskCompleter taskCompleter = null;
    Volume volume = null;
    try {
        // Read volume from database, update the Vpool to the new completer, and create task completer.
        volume = _dbClient.queryObject(Volume.class, volumeURI);
        URI oldVpoolURI = volume.getVirtualPool();
        List<URI> rollbackList = new ArrayList<URI>();
        List<Volume> updatedVolumes = new ArrayList<Volume>();
        rollbackList.add(volumeURI);
        // Check if it is a VPlex volume, and get backend volumes
        Volume backendSrc = VPlexUtil.getVPLEXBackendVolume(volume, true, _dbClient, false);
        if (backendSrc != null) {
            // Change the back end volume's vpool too
            backendSrc.setVirtualPool(newVpoolURI);
            rollbackList.add(backendSrc.getId());
            updatedVolumes.add(backendSrc);
            // VPlex volume, check if it is distributed
            Volume backendHa = VPlexUtil.getVPLEXBackendVolume(volume, false, _dbClient, false);
            if (backendHa != null && backendHa.getVirtualPool() != null && backendHa.getVirtualPool().toString().equals(oldVpoolURI.toString())) {
                backendHa.setVirtualPool(newVpoolURI);
                rollbackList.add(backendHa.getId());
                updatedVolumes.add(backendHa);
            }
        }
        // The VolumeVpoolChangeTaskCompleter will restore the old Virtual Pool in event of error.
        taskCompleter = new VolumeVpoolChangeTaskCompleter(rollbackList, oldVpoolURI, opId);
        volume.setVirtualPool(newVpoolURI);
        updatedVolumes.add(volume);
        _log.info(String.format("Changing VirtualPool PathParams for volume %s (%s) from %s to %s", volume.getLabel(), volume.getId(), oldVpoolURI, newVpoolURI));
        _dbClient.updateObject(updatedVolumes);
    } catch (Exception ex) {
        _log.error("Unexpected exception reading volume or generating taskCompleter: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volumeURI, opId);
        completer.error(_dbClient, serviceError);
    }
    try {
        Workflow workflow = _wfUtils.newWorkflow("updateVolumePathParams", false, opId);
        // Locate all the ExportMasks containing the given volume, and their Export Group.
        Map<ExportMask, ExportGroup> maskToGroupMap = ExportUtils.getExportMasks(volume, _dbClient);
        Map<URI, StringSetMap> maskToZoningMap = new HashMap<URI, StringSetMap>();
        // Store the original zoning maps of the export masks to be used to restore in case of a failure
        for (ExportMask mask : maskToGroupMap.keySet()) {
            maskToZoningMap.put(mask.getId(), mask.getZoningMap());
        }
        taskCompleter.setMaskToZoningMap(maskToZoningMap);
        // Acquire all necessary locks for the workflow:
        // For each export group lock initiator's hosts and storage array keys.
        List<URI> initiatorURIs = new ArrayList<URI>();
        for (ExportGroup exportGroup : maskToGroupMap.values()) {
            initiatorURIs.addAll(StringSetUtil.stringSetToUriList(exportGroup.getInitiators()));
            List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), initiatorURIs, volume.getStorageController());
            initiatorURIs.clear();
            boolean acquiredLocks = _wfUtils.getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
            if (!acquiredLocks) {
                throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "UpdateVolumePathParams: " + volume.getLabel());
            }
        }
        // These steps are serialized, which is required in case an ExportMask appears
        // in multiple Export Groups.
        String stepId = null;
        for (ExportGroup exportGroup : maskToGroupMap.values()) {
            stepId = _wfUtils.generateExportChangePathParams(workflow, "changePathParams", stepId, volume.getStorageController(), exportGroup.getId(), volumeURI);
        }
        if (!workflow.getAllStepStatus().isEmpty()) {
            _log.info("The updateVolumePathParams workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
            workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception ex) {
        _log.error("Unexpected exception: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        taskCompleter.error(_dbClient, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) StringSetMap(com.emc.storageos.db.client.model.StringSetMap) HashMap(java.util.HashMap) VolumeWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter) ExportMask(com.emc.storageos.db.client.model.ExportMask) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) IOException(java.io.IOException) LockRetryException(com.emc.storageos.locking.LockRetryException) VolumeVpoolChangeTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeVpoolChangeTaskCompleter) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Volume(com.emc.storageos.db.client.model.Volume)

Example 57 with ExportGroup

use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.

the class MaskingWorkflowEntryPoints method doExportMaskZoningMapUpdate.

/**
 * Zoning map update entry point
 */
public void doExportMaskZoningMapUpdate(URI exportGroupURI, URI storageURI, String token) throws ControllerException {
    _log.info("START - doExportMaskZoningMapUpdate");
    WorkflowStepCompleter.stepExecuting(token);
    try {
        ExportGroup eg = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, eg);
        // so fetch the 0th URI
        if (!exportMasks.isEmpty()) {
            ExportMask mask = exportMasks.get(0);
            _blockScheduler.updateZoningMap(mask, eg.getVirtualArray(), exportGroupURI);
        }
        WorkflowStepCompleter.stepSucceded(token);
    } catch (final InternalException e) {
        _log.error("Encountered an exception", e);
        WorkflowStepCompleter.stepFailed(token, e);
    } catch (final Exception e) {
        _log.error("Encountered an exception", e);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
        WorkflowStepCompleter.stepFailed(token, serviceError);
    }
    _log.info("END - doExportMaskZoningMapUpdate");
}
Also used : ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportMask(com.emc.storageos.db.client.model.ExportMask) WorkflowException(com.emc.storageos.workflow.WorkflowException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException)

Example 58 with ExportGroup

use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.

the class ScaleIOMaskingOrchestrator method exportGroupCreate.

@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
            // Create a mapping of ExportMasks to Add Volumes to or
            // add to a list of new Exports to create
            Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>();
            List<URI> newInitiators = new ArrayList<>();
            List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
            for (Initiator initiator : initiators) {
                List<ExportMask> exportMasks = ExportUtils.getInitiatorExportMasks(initiator, storage.getId(), _dbClient);
                if (exportMasks == null || exportMasks.isEmpty()) {
                    newInitiators.add(initiator.getId());
                } else {
                    for (ExportMask exportMask : exportMasks) {
                        exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap);
                    }
                }
            }
            Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, newInitiators);
            log.info(String.format("Need to create ExportMasks for these compute resources %s", Joiner.on(',').join(computeResourceToInitiators.entrySet())));
            // there aren't any already existing ExportMask for them
            for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) {
                generateExportMaskCreateWorkflow(workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token);
            }
            log.info(String.format("Need to add volumes for these ExportMasks %s", exportMaskToVolumesToAdd.entrySet()));
            // concept ExportMasks.
            for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey());
                generateExportMaskAddVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue(), null);
            }
            String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (DeviceControllerException dex) {
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupCreate", dex.getMessage()));
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupCreate", ex.getMessage()));
    }
}
Also used : HashMap(java.util.HashMap) ExportMask(com.emc.storageos.db.client.model.ExportMask) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Initiator(com.emc.storageos.db.client.model.Initiator) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 59 with ExportGroup

use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.

the class ScaleIOMaskingOrchestrator method exportGroupRemoveVolumes.

@Override
public void exportGroupRemoveVolumes(URI storageURI, URI exportGroupURI, List<URI> volumeURIs, String token) throws Exception {
    /*
         * foreach volume in list
         * foreach initiator in ExportGroup
         * if volume not used in another ExportGroup with same initiator
         * scli unmap --volume volid --sdc initiator.sdcid
         */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
        if (masks != null && !masks.isEmpty()) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveVolumes", true, token);
            // Generate a list of Initiators
            List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
            Map<URI, List<URI>> exportToRemoveVolumesList = new HashMap<>();
            // Generate a mapping of volume URIs to the # of
            // ExportGroups that it is associated with
            Map<URI, Map<URI, Integer>> exportMaskToVolumeCount = ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);
            // remove from that ExportMask
            for (ExportMask exportMask : masks) {
                Map<URI, Integer> volumeToCountMap = exportMaskToVolumeCount.get(exportMask.getId());
                if (volumeToCountMap == null) {
                    continue;
                }
                for (Map.Entry<URI, Integer> it : volumeToCountMap.entrySet()) {
                    URI volumeURI = it.getKey();
                    Integer numberOfExportGroupsVolumesIsIn = it.getValue();
                    if (numberOfExportGroupsVolumesIsIn == 1) {
                        List<URI> volumesToRemove = exportToRemoveVolumesList.get(exportMask.getId());
                        if (volumesToRemove == null) {
                            volumesToRemove = new ArrayList<>();
                            exportToRemoveVolumesList.put(exportMask.getId(), volumesToRemove);
                        }
                        volumesToRemove.add(volumeURI);
                    }
                }
            }
            // generate a step to remove the volumes from the ExportMask
            for (Map.Entry<URI, List<URI>> entry : exportToRemoveVolumesList.entrySet()) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, entry.getKey());
                log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(entry.getValue()), exportMask.getMaskName()));
                generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, entry.getValue(), null, null);
            }
            String successMessage = String.format("ExportGroup remove volumes successfully applied for StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (DeviceControllerException dex) {
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveVolumes", dex.getMessage()));
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveVolumes", ex.getMessage()));
    }
}
Also used : HashMap(java.util.HashMap) ExportMask(com.emc.storageos.db.client.model.ExportMask) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 60 with ExportGroup

use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.

the class ScaleIOMaskingOrchestrator method exportGroupRemoveInitiators.

@Override
public void exportGroupRemoveInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
    /*
         * foreach ScaleOI volume in ExportGroup
         * foreach initiator in list
         * if volume not used in another ExportGroup with same initiator
         * scli unmap --volume volid --sdc initiator.sdcid
         */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
        ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
        if (initiatorURIs != null && !initiatorURIs.isEmpty() && exportGroup != null && exportGroup.getExportMasks() != null) {
            // Set up workflow steps.
            Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveInitiators", true, token);
            // Create a mapping of ExportMask URI to initiators to remove
            Map<URI, List<URI>> exportToInitiatorsToRemove = new HashMap<>();
            Map<URI, List<URI>> exportToVolumesToRemove = new HashMap<>();
            Map<URI, Integer> volumeMap = null;
            List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup);
            for (ExportMask exportMask : exportMasks) {
                if (exportMask == null) {
                    continue;
                }
                for (URI initiatorURI : initiatorURIs) {
                    Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
                    if (initiator == null || !exportMask.hasInitiator(initiatorURI.toString())) {
                        continue;
                    }
                    if (ExportUtils.getInitiatorExportGroups(initiator, _dbClient).size() == 1) {
                        List<URI> initiators = exportToInitiatorsToRemove.get(exportGroupURI);
                        if (initiators == null) {
                            initiators = new ArrayList<>();
                            exportToInitiatorsToRemove.put(exportMask.getId(), initiators);
                        }
                        initiators.add(initiatorURI);
                    } else {
                        if (volumeMap == null) {
                            volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
                        }
                        List<URI> volumeURIs = exportToVolumesToRemove.get(exportGroupURI);
                        if (volumeURIs == null) {
                            volumeURIs = new ArrayList<>();
                            exportToVolumesToRemove.put(exportMask.getId(), volumeURIs);
                        }
                        for (URI volumeURI : volumeMap.keySet()) {
                            // Only add to the remove list for the ExportMask if
                            // the EM is not being shared with another ExportGroup
                            Integer count = ExportUtils.getNumberOfExportGroupsWithVolume(initiator, volumeURI, _dbClient);
                            if (count == 1) {
                                volumeURIs.add(volumeURI);
                            }
                        }
                    }
                }
            }
            // Generate the remove initiators steps for the entries that were determined above
            for (Map.Entry<URI, List<URI>> toRemoveInits : exportToInitiatorsToRemove.entrySet()) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toRemoveInits.getKey());
                if (exportMask != null) {
                    List<URI> removeInitURIs = toRemoveInits.getValue();
                    List<String> exportMaskInitiatorURIs = new ArrayList<>(exportMask.getInitiators());
                    for (URI uri : removeInitURIs) {
                        exportMaskInitiatorURIs.remove(uri.toString());
                    }
                    if (exportMaskInitiatorURIs.isEmpty()) {
                        log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
                        generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, null);
                    } else {
                        log.info(String.format("Adding step to remove initiators %s from ExportMask %s", Joiner.on(',').join(removeInitURIs), exportMask.getMaskName()));
                        generateExportMaskRemoveInitiatorsWorkflow(workflow, null, storage, exportGroup, exportMask, null, removeInitURIs, true);
                    }
                }
            }
            // from an ExportGroup that contains more than one host/initiator
            for (Map.Entry<URI, List<URI>> toRemoveVols : exportToVolumesToRemove.entrySet()) {
                ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toRemoveVols.getKey());
                List<URI> removeVolumeURIs = toRemoveVols.getValue();
                if (exportMask != null && !removeVolumeURIs.isEmpty()) {
                    List<String> exportMaskVolumeURIs = new ArrayList<>(exportMask.getVolumes().keySet());
                    for (URI uri : removeVolumeURIs) {
                        exportMaskVolumeURIs.remove(uri.toString());
                    }
                    if (exportMaskVolumeURIs.isEmpty()) {
                        log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
                        generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, null);
                    } else {
                        log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(removeVolumeURIs), exportMask.getMaskName()));
                        generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, removeVolumeURIs, null, null);
                    }
                }
            }
            String successMessage = String.format("ExportGroup remove initiators successfully applied for StorageArray %s", storage.getLabel());
            workflow.executePlan(taskCompleter, successMessage);
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (DeviceControllerException dex) {
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveInitiators", dex.getMessage()));
    } catch (Exception ex) {
        _log.error("ExportGroup Orchestration failed.", ex);
        taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveInitiators", ex.getMessage()));
    }
}
Also used : HashMap(java.util.HashMap) ExportMask(com.emc.storageos.db.client.model.ExportMask) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Initiator(com.emc.storageos.db.client.model.Initiator) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ExportOrchestrationTask(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Aggregations

ExportGroup (com.emc.storageos.db.client.model.ExportGroup)278 URI (java.net.URI)206 ArrayList (java.util.ArrayList)139 ExportMask (com.emc.storageos.db.client.model.ExportMask)138 DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)111 HashMap (java.util.HashMap)94 Initiator (com.emc.storageos.db.client.model.Initiator)86 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)84 NamedURI (com.emc.storageos.db.client.model.NamedURI)80 HashSet (java.util.HashSet)70 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)63 Workflow (com.emc.storageos.workflow.Workflow)61 List (java.util.List)59 URIQueryResultList (com.emc.storageos.db.client.constraint.URIQueryResultList)55 BlockObject (com.emc.storageos.db.client.model.BlockObject)49 Map (java.util.Map)47 ExportOrchestrationTask (com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask)44 ControllerException (com.emc.storageos.volumecontroller.ControllerException)41 StringSet (com.emc.storageos.db.client.model.StringSet)38 StringMap (com.emc.storageos.db.client.model.StringMap)33