Search in sources :

Example 1 with VolumeWorkflowCompleter

use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter in project coprhd-controller by CoprHD.

the class BlockOrchestrationDeviceController method deleteVolumes.

/*
     * (non-Javadoc)
     * 
     * @see com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController#deleteVolumes(java.util.List,
     * java.lang.String)
     */
@Override
public void deleteVolumes(List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
    List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumes);
    VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volUris, taskId);
    Workflow workflow = null;
    try {
        // Validate the volume identities before proceeding
        validator.volumeURIs(volUris, true, true, ValCk.ID, ValCk.VPLEX);
        // Generate the Workflow.
        workflow = _workflowService.getNewWorkflow(this, DELETE_VOLUMES_WF_NAME, true, taskId);
        // the wait for key returned by previous call
        String waitFor = null;
        // Call the RPDeviceController to add its methods if there are RP protections.
        waitFor = _rpDeviceController.addStepsForDeleteVolumes(workflow, waitFor, volumes, taskId);
        // Call the ReplicaDeviceController to add its methods if volumes are removed from,
        // and the CG associated with replication group(s)
        waitFor = _replicaDeviceController.addStepsForDeleteVolumes(workflow, waitFor, volumes, taskId);
        // Call the VPlexDeviceController to add its methods if there are VPLEX volumes.
        waitFor = _vplexDeviceController.addStepsForDeleteVolumes(workflow, waitFor, volumes, taskId);
        // Call the RPDeviceController to add its post-delete methods.
        waitFor = _rpDeviceController.addStepsForPostDeleteVolumes(workflow, waitFor, volumes, taskId, completer, _blockDeviceController);
        // Call the SRDFDeviceController to add its methods if there are SRDF volumes.
        waitFor = _srdfDeviceController.addStepsForDeleteVolumes(workflow, waitFor, volumes, taskId);
        // Next, call the BlockDeviceController to add its methods.
        waitFor = _blockDeviceController.addStepsForDeleteVolumes(workflow, waitFor, volumes, taskId);
        // Next, call the BlockDeviceController to add post deletion methods.
        waitFor = _blockDeviceController.addStepsForPostDeleteVolumes(workflow, waitFor, volumes, taskId, completer);
        // Call the VPlexDeviceController to add its post-delete methods.
        waitFor = _vplexDeviceController.addStepsForPostDeleteVolumes(workflow, waitFor, volumes, taskId, completer);
        // Finish up and execute the plan.
        // The Workflow will handle the TaskCompleter
        String successMessage = "Delete volumes successful for: " + volUris.toString();
        Object[] callbackArgs = new Object[] { volUris };
        workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
    } catch (Exception ex) {
        s_logger.error("Could not delete volumes: " + volUris, ex);
        releaseWorkflowLocks(workflow);
        String opName = ResourceOperationTypeEnum.DELETE_BLOCK_VOLUME.getName();
        ServiceError serviceError = DeviceControllerException.errors.deleteVolumesFailed(volUris.toString(), opName, ex);
        completer.error(s_dbClient, _locker, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) VolumeWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter) Workflow(com.emc.storageos.workflow.Workflow) BlockObject(com.emc.storageos.db.client.model.BlockObject) URI(java.net.URI) WorkflowException(com.emc.storageos.workflow.WorkflowException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) LockRetryException(com.emc.storageos.locking.LockRetryException)

Example 2 with VolumeWorkflowCompleter

use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter in project coprhd-controller by CoprHD.

the class BlockOrchestrationDeviceController method expandVolume.

/*
     * (non-Javadoc)
     * 
     * @see com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController#expandVolume(java.net.URI, long,
     * java.lang.String)
     */
@Override
public void expandVolume(List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
    List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumes);
    VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volUris, taskId);
    try {
        // Validate the volume identities before proceeding
        validator.volumeURIs(volUris, true, true, ValCk.ID, ValCk.VPLEX);
        // Generate the Workflow.
        Workflow workflow = _workflowService.getNewWorkflow(this, EXPAND_VOLUMES_WF_NAME, true, taskId);
        // the wait for key returned by previous call
        String waitFor = null;
        // First, call the RP controller to add methods for RP CG delete
        waitFor = _rpDeviceController.addPreVolumeExpandSteps(workflow, volumes, taskId);
        // Call the BlockDeviceController to add its methods if there are block or VPLEX backend volumes.
        waitFor = _blockDeviceController.addStepsForExpandVolume(workflow, waitFor, volumes, taskId);
        // Call the SRDFDeviceController to add its methods for SRDF Source / SRDF Target volumes.
        waitFor = _srdfDeviceController.addStepsForExpandVolume(workflow, waitFor, volumes, taskId);
        // Call the VPlexDeviceController to add its methods if there are VPLEX volumes.
        waitFor = _vplexDeviceController.addStepsForExpandVolume(workflow, waitFor, volumes, taskId);
        // Call the RPDeviceController to add its methods for post volume expand ie. recreate RPCG
        waitFor = _rpDeviceController.addPostVolumeExpandSteps(workflow, waitFor, volumes, taskId);
        // Finish up and execute the plan.
        // The Workflow will handle the TaskCompleter
        String successMessage = "Expand volume successful for: " + volUris.toString();
        Object[] callbackArgs = new Object[] { new ArrayList<URI>(volUris) };
        workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
    } catch (Exception ex) {
        s_logger.error("Could not expand volume: " + volUris, toString(), ex);
        String opName = ResourceOperationTypeEnum.EXPAND_BLOCK_VOLUME.getName();
        ServiceError serviceError = DeviceControllerException.errors.expandVolumeFailed(volUris.toString(), opName, ex);
        completer.error(s_dbClient, _locker, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) VolumeWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) BlockObject(com.emc.storageos.db.client.model.BlockObject) URI(java.net.URI) WorkflowException(com.emc.storageos.workflow.WorkflowException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) LockRetryException(com.emc.storageos.locking.LockRetryException)

Example 3 with VolumeWorkflowCompleter

use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter in project coprhd-controller by CoprHD.

the class RPDeviceController method updateConsistencyGroupPolicy.

@Override
public void updateConsistencyGroupPolicy(URI protectionDevice, URI consistencyGroup, List<URI> volumeURIs, URI newVpoolURI, String task) throws InternalException {
    _log.info(String.format("Request to update consistency group policy for volumes %s through virtual pool change to %s", volumeURIs, newVpoolURI));
    VolumeVpoolChangeTaskCompleter taskCompleter = null;
    URI oldVpoolURI = null;
    List<Volume> volumes = new ArrayList<Volume>();
    List<Volume> vplexBackendVolumes = new ArrayList<Volume>();
    try {
        // Get all CG source volumes. The entire CG policy is being updated so we
        // need to capture the existing vpools for all the source volumes before
        // changing them.
        List<Volume> cgVolumes = RPHelper.getCgSourceVolumes(consistencyGroup, _dbClient);
        VirtualPool newVpool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
        Map<URI, URI> oldVpools = new HashMap<URI, URI>();
        for (Volume volume : cgVolumes) {
            // Save the old virtual pool
            oldVpoolURI = volume.getVirtualPool();
            oldVpools.put(volume.getId(), oldVpoolURI);
            // Update to the new virtual pool
            volume.setVirtualPool(newVpoolURI);
            volumes.add(volume);
            // If this is a VPlex volume, there will be
            StringSet associatedVolumeIds = volume.getAssociatedVolumes();
            // Perform additional tasks if this volume is a VPlex volume
            if (associatedVolumeIds != null && !associatedVolumeIds.isEmpty()) {
                Volume backendSrc = null;
                Volume backendHa = null;
                for (String associatedVolumeId : associatedVolumeIds) {
                    Volume associatedVolume = _dbClient.queryObject(Volume.class, URI.create(associatedVolumeId));
                    // Assign the associated volumes to either be the source or HA
                    if (associatedVolume != null) {
                        if (associatedVolume.getVirtualArray().equals(volume.getVirtualArray())) {
                            backendSrc = associatedVolume;
                        } else {
                            backendHa = associatedVolume;
                        }
                    }
                }
                if (backendSrc != null) {
                    // Change the back end volume's vPool too
                    backendSrc.setVirtualPool(newVpoolURI);
                    vplexBackendVolumes.add(backendSrc);
                    _log.info(String.format("Changing VirtualPool for VPLEX backend source volume %s (%s) from %s to %s", backendSrc.getLabel(), backendSrc.getId(), oldVpoolURI, newVpoolURI));
                    if (backendHa != null) {
                        VirtualPool newHAVpool = VirtualPool.getHAVPool(newVpool, _dbClient);
                        if (newHAVpool == null) {
                            // it may not be set
                            newHAVpool = newVpool;
                        }
                        backendHa.setVirtualPool(newHAVpool.getId());
                        vplexBackendVolumes.add(backendHa);
                    }
                }
            }
        }
        _dbClient.updateObject(volumes);
        _dbClient.updateObject(vplexBackendVolumes);
        // The VolumeVpoolChangeTaskCompleter will restore the old Virtual Pool
        taskCompleter = new VolumeVpoolChangeTaskCompleter(volumeURIs, oldVpools, task);
    } catch (Exception ex) {
        _log.error("Unexpected exception reading volume or generating taskCompleter: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volumeURIs, task);
        completer.error(_dbClient, serviceError);
    }
    try {
        Workflow workflow = _workflowService.getNewWorkflow(this, "updateReplicationMode", false, task);
        ProtectionSystem protectionSystem = _dbClient.queryObject(ProtectionSystem.class, protectionDevice);
        if (!volumes.isEmpty()) {
            VirtualPool newVirtualPool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
            // Add workflow step
            addUpdateConsistencyGroupPolicyStep(workflow, protectionSystem, consistencyGroup, newVirtualPool.getRpCopyMode());
        }
        if (!workflow.getAllStepStatus().isEmpty()) {
            _log.info("The updateAutoTieringPolicy workflow has {} step(s). Starting the workflow.", workflow.getAllStepStatus().size());
            workflow.executePlan(taskCompleter, "Updated the consistency group policy successfully.");
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception ex) {
        _log.error("Unexpected exception: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        taskCompleter.error(_dbClient, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) HashMap(java.util.HashMap) VolumeWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) VirtualPool(com.emc.storageos.db.client.model.VirtualPool) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) ProtectionSystem(com.emc.storageos.db.client.model.ProtectionSystem) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) InternalServerErrorException(com.emc.storageos.svcs.errorhandling.resources.InternalServerErrorException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) LockRetryException(com.emc.storageos.locking.LockRetryException) FunctionalAPIActionFailedException_Exception(com.emc.fapiclient.ws.FunctionalAPIActionFailedException_Exception) URISyntaxException(java.net.URISyntaxException) WorkflowException(com.emc.storageos.workflow.WorkflowException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) FunctionalAPIInternalError_Exception(com.emc.fapiclient.ws.FunctionalAPIInternalError_Exception) CoordinatorException(com.emc.storageos.coordinator.exceptions.CoordinatorException) RecoverPointException(com.emc.storageos.recoverpoint.exceptions.RecoverPointException) VolumeVpoolChangeTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeVpoolChangeTaskCompleter) Volume(com.emc.storageos.db.client.model.Volume) StringSet(com.emc.storageos.db.client.model.StringSet)

Example 4 with VolumeWorkflowCompleter

use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter in project coprhd-controller by CoprHD.

the class BlockDeviceExportController method updatePolicyAndLimits.

@Override
public void updatePolicyAndLimits(List<URI> volumeURIs, URI newVpoolURI, String opId) throws ControllerException {
    _log.info("Received request to update Auto-tiering policy. Creating master workflow.");
    VolumeVpoolAutoTieringPolicyChangeTaskCompleter taskCompleter = null;
    URI oldVpoolURI = null;
    List<Volume> volumes = new ArrayList<Volume>();
    List<Volume> vplexBackendVolumes = new ArrayList<Volume>();
    try {
        // Read volume from database, update the vPool to the new vPool
        // and update new auto tiering policy uri, and create task completer.
        volumes = _dbClient.queryObject(Volume.class, volumeURIs);
        VirtualPool newVpool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
        Map<URI, URI> oldVolToPolicyMap = new HashMap<URI, URI>();
        for (Volume volume : volumes) {
            oldVpoolURI = volume.getVirtualPool();
            volume.setVirtualPool(newVpoolURI);
            _log.info(String.format("Changing VirtualPool Auto-tiering Policy for volume %s (%s) from %s to %s", volume.getLabel(), volume.getId(), oldVpoolURI, newVpoolURI));
            oldVolToPolicyMap.put(volume.getId(), volume.getAutoTieringPolicyUri());
            updateAutoTieringPolicyUriInVolume(volume, newVpool);
            // Check if it is a VPlex volume, and get backend volumes
            Volume backendSrc = VPlexUtil.getVPLEXBackendVolume(volume, true, _dbClient, false);
            if (backendSrc != null) {
                // Change the back end volume's vPool too
                backendSrc.setVirtualPool(newVpoolURI);
                vplexBackendVolumes.add(backendSrc);
                _log.info(String.format("Changing VirtualPool Auto-tiering Policy for VPLEX backend source volume %s (%s) from %s to %s", backendSrc.getLabel(), backendSrc.getId(), oldVpoolURI, newVpoolURI));
                oldVolToPolicyMap.put(backendSrc.getId(), backendSrc.getAutoTieringPolicyUri());
                updateAutoTieringPolicyUriInVolume(backendSrc, newVpool);
                // VPlex volume, check if it is distributed
                Volume backendHa = VPlexUtil.getVPLEXBackendVolume(volume, false, _dbClient, false);
                if (backendHa != null) {
                    VirtualPool newHAVpool = VirtualPool.getHAVPool(newVpool, _dbClient);
                    if (newHAVpool == null) {
                        // it may not be set
                        newHAVpool = newVpool;
                    }
                    backendHa.setVirtualPool(newHAVpool.getId());
                    vplexBackendVolumes.add(backendHa);
                    _log.info(String.format("Changing VirtualPool Auto-tiering Policy for VPLEX backend distributed volume %s (%s) from %s to %s", backendHa.getLabel(), backendHa.getId(), oldVpoolURI, newHAVpool.getId()));
                    oldVolToPolicyMap.put(backendHa.getId(), backendHa.getAutoTieringPolicyUri());
                    updateAutoTieringPolicyUriInVolume(backendHa, newHAVpool);
                }
            }
        }
        _dbClient.updateObject(volumes);
        _dbClient.updateObject(vplexBackendVolumes);
        // The VolumeVpoolChangeTaskCompleter will restore the old Virtual Pool
        // and old auto tiering policy in event of error.
        // Assume all volumes belong to the same vPool. This should be take care by BlockService API.
        taskCompleter = new VolumeVpoolAutoTieringPolicyChangeTaskCompleter(volumeURIs, oldVpoolURI, oldVolToPolicyMap, opId);
    } catch (Exception ex) {
        _log.error("Unexpected exception reading volume or generating taskCompleter: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volumeURIs, opId);
        completer.error(_dbClient, serviceError);
    }
    try {
        Workflow workflow = _wfUtils.newWorkflow("updateAutoTieringPolicy", false, opId);
        /**
         * For VMAX:
         * get corresponding export mask for each volume
         * group volumes by export mask
         * create workflow step for each export mask.
         *
         * For VNX Block:
         * Policy is set on volume during its creation.
         * Whether it is exported or not, send all volumes
         * to update StorageTierMethodology property on them.
         * Create workflow step for each storage system.
         */
        // Use backend volumes list if it is VPLEX volume
        List<Volume> volumesToUse = !vplexBackendVolumes.isEmpty() ? vplexBackendVolumes : volumes;
        // move applicable volumes from all volumes list to a separate list.
        Map<URI, List<URI>> systemToVolumeMap = getVolumesToModify(volumesToUse);
        String stepId = null;
        for (URI systemURI : systemToVolumeMap.keySet()) {
            stepId = _wfUtils.generateExportChangePolicyAndLimits(workflow, "updateAutoTieringPolicy", stepId, systemURI, null, null, systemToVolumeMap.get(systemURI), newVpoolURI, oldVpoolURI);
        }
        Map<URI, List<URI>> storageToNotExportedVolumesMap = new HashMap<URI, List<URI>>();
        Map<URI, List<URI>> exportMaskToVolumeMap = new HashMap<URI, List<URI>>();
        Map<URI, URI> maskToGroupURIMap = new HashMap<URI, URI>();
        for (Volume volume : volumesToUse) {
            // Locate all the ExportMasks containing the given volume
            Map<ExportMask, ExportGroup> maskToGroupMap = ExportUtils.getExportMasks(volume, _dbClient);
            if (maskToGroupMap.isEmpty()) {
                URI storageURI = volume.getStorageController();
                StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
                if (storage.checkIfVmax3()) {
                    if (!storageToNotExportedVolumesMap.containsKey(storageURI)) {
                        storageToNotExportedVolumesMap.put(storageURI, new ArrayList<URI>());
                    }
                    storageToNotExportedVolumesMap.get(storageURI).add(volume.getId());
                }
            }
            for (ExportMask mask : maskToGroupMap.keySet()) {
                if (!exportMaskToVolumeMap.containsKey(mask.getId())) {
                    exportMaskToVolumeMap.put(mask.getId(), new ArrayList<URI>());
                }
                exportMaskToVolumeMap.get(mask.getId()).add(volume.getId());
                maskToGroupURIMap.put(mask.getId(), maskToGroupMap.get(mask).getId());
            }
        }
        VirtualPool oldVpool = _dbClient.queryObject(VirtualPool.class, oldVpoolURI);
        for (URI exportMaskURI : exportMaskToVolumeMap.keySet()) {
            ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
            List<URI> exportMaskVolumes = exportMaskToVolumeMap.get(exportMaskURI);
            URI exportMaskNewVpool = newVpoolURI;
            URI exportMaskOldVpool = oldVpoolURI;
            Volume vol = _dbClient.queryObject(Volume.class, exportMaskVolumes.get(0));
            // all volumes are already updated with respective new vPool
            if (Volume.checkForVplexBackEndVolume(_dbClient, vol) && !newVpoolURI.equals(vol.getVirtualPool())) {
                // backend distributed volume; HA vPool set in Vplex vPool
                exportMaskNewVpool = vol.getVirtualPool();
                VirtualPool oldHAVpool = VirtualPool.getHAVPool(oldVpool, _dbClient);
                if (oldHAVpool == null) {
                    // it may not be set
                    oldHAVpool = oldVpool;
                }
                exportMaskOldVpool = oldHAVpool.getId();
            }
            stepId = _wfUtils.generateExportChangePolicyAndLimits(workflow, "updateAutoTieringPolicy", stepId, exportMask.getStorageDevice(), exportMaskURI, maskToGroupURIMap.get(exportMaskURI), exportMaskVolumes, exportMaskNewVpool, exportMaskOldVpool);
        }
        for (URI storageURI : storageToNotExportedVolumesMap.keySet()) {
            stepId = _wfUtils.generateChangeAutoTieringPolicy(workflow, "updateAutoTieringPolicyForNotExportedVMAX3Volumes", stepId, storageURI, storageToNotExportedVolumesMap.get(storageURI), newVpoolURI, oldVpoolURI);
        }
        if (!workflow.getAllStepStatus().isEmpty()) {
            _log.info("The updateAutoTieringPolicy workflow has {} step(s). Starting the workflow.", workflow.getAllStepStatus().size());
            workflow.executePlan(taskCompleter, "Updated the export group on all storage systems successfully.");
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception ex) {
        _log.error("Unexpected exception: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        taskCompleter.error(_dbClient, serviceError);
    }
}
Also used : VolumeVpoolAutoTieringPolicyChangeTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeVpoolAutoTieringPolicyChangeTaskCompleter) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) HashMap(java.util.HashMap) VolumeWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter) ExportMask(com.emc.storageos.db.client.model.ExportMask) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) VirtualPool(com.emc.storageos.db.client.model.VirtualPool) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) IOException(java.io.IOException) LockRetryException(com.emc.storageos.locking.LockRetryException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Volume(com.emc.storageos.db.client.model.Volume) List(java.util.List) ArrayList(java.util.ArrayList) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 5 with VolumeWorkflowCompleter

use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter in project coprhd-controller by CoprHD.

the class BlockDeviceExportController method updateVolumePathParams.

@Override
public void updateVolumePathParams(URI volumeURI, URI newVpoolURI, String opId) throws ControllerException {
    _log.info("Received request to update Volume path parameters. Creating master workflow.");
    VolumeVpoolChangeTaskCompleter taskCompleter = null;
    Volume volume = null;
    try {
        // Read volume from database, update the Vpool to the new completer, and create task completer.
        volume = _dbClient.queryObject(Volume.class, volumeURI);
        URI oldVpoolURI = volume.getVirtualPool();
        List<URI> rollbackList = new ArrayList<URI>();
        List<Volume> updatedVolumes = new ArrayList<Volume>();
        rollbackList.add(volumeURI);
        // Check if it is a VPlex volume, and get backend volumes
        Volume backendSrc = VPlexUtil.getVPLEXBackendVolume(volume, true, _dbClient, false);
        if (backendSrc != null) {
            // Change the back end volume's vpool too
            backendSrc.setVirtualPool(newVpoolURI);
            rollbackList.add(backendSrc.getId());
            updatedVolumes.add(backendSrc);
            // VPlex volume, check if it is distributed
            Volume backendHa = VPlexUtil.getVPLEXBackendVolume(volume, false, _dbClient, false);
            if (backendHa != null && backendHa.getVirtualPool() != null && backendHa.getVirtualPool().toString().equals(oldVpoolURI.toString())) {
                backendHa.setVirtualPool(newVpoolURI);
                rollbackList.add(backendHa.getId());
                updatedVolumes.add(backendHa);
            }
        }
        // The VolumeVpoolChangeTaskCompleter will restore the old Virtual Pool in event of error.
        taskCompleter = new VolumeVpoolChangeTaskCompleter(rollbackList, oldVpoolURI, opId);
        volume.setVirtualPool(newVpoolURI);
        updatedVolumes.add(volume);
        _log.info(String.format("Changing VirtualPool PathParams for volume %s (%s) from %s to %s", volume.getLabel(), volume.getId(), oldVpoolURI, newVpoolURI));
        _dbClient.updateObject(updatedVolumes);
    } catch (Exception ex) {
        _log.error("Unexpected exception reading volume or generating taskCompleter: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volumeURI, opId);
        completer.error(_dbClient, serviceError);
    }
    try {
        Workflow workflow = _wfUtils.newWorkflow("updateVolumePathParams", false, opId);
        // Locate all the ExportMasks containing the given volume, and their Export Group.
        Map<ExportMask, ExportGroup> maskToGroupMap = ExportUtils.getExportMasks(volume, _dbClient);
        Map<URI, StringSetMap> maskToZoningMap = new HashMap<URI, StringSetMap>();
        // Store the original zoning maps of the export masks to be used to restore in case of a failure
        for (ExportMask mask : maskToGroupMap.keySet()) {
            maskToZoningMap.put(mask.getId(), mask.getZoningMap());
        }
        taskCompleter.setMaskToZoningMap(maskToZoningMap);
        // Acquire all necessary locks for the workflow:
        // For each export group lock initiator's hosts and storage array keys.
        List<URI> initiatorURIs = new ArrayList<URI>();
        for (ExportGroup exportGroup : maskToGroupMap.values()) {
            initiatorURIs.addAll(StringSetUtil.stringSetToUriList(exportGroup.getInitiators()));
            List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), initiatorURIs, volume.getStorageController());
            initiatorURIs.clear();
            boolean acquiredLocks = _wfUtils.getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
            if (!acquiredLocks) {
                throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "UpdateVolumePathParams: " + volume.getLabel());
            }
        }
        // These steps are serialized, which is required in case an ExportMask appears
        // in multiple Export Groups.
        String stepId = null;
        for (ExportGroup exportGroup : maskToGroupMap.values()) {
            stepId = _wfUtils.generateExportChangePathParams(workflow, "changePathParams", stepId, volume.getStorageController(), exportGroup.getId(), volumeURI);
        }
        if (!workflow.getAllStepStatus().isEmpty()) {
            _log.info("The updateVolumePathParams workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
            workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (Exception ex) {
        _log.error("Unexpected exception: ", ex);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        taskCompleter.error(_dbClient, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) StringSetMap(com.emc.storageos.db.client.model.StringSetMap) HashMap(java.util.HashMap) VolumeWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter) ExportMask(com.emc.storageos.db.client.model.ExportMask) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) IOException(java.io.IOException) LockRetryException(com.emc.storageos.locking.LockRetryException) VolumeVpoolChangeTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeVpoolChangeTaskCompleter) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Volume(com.emc.storageos.db.client.model.Volume)

Aggregations

DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)8 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)8 VolumeWorkflowCompleter (com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeWorkflowCompleter)8 URI (java.net.URI)8 Workflow (com.emc.storageos.workflow.Workflow)7 ControllerException (com.emc.storageos.volumecontroller.ControllerException)6 LockRetryException (com.emc.storageos.locking.LockRetryException)5 ArrayList (java.util.ArrayList)5 Volume (com.emc.storageos.db.client.model.Volume)4 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)4 WorkflowException (com.emc.storageos.workflow.WorkflowException)4 HashMap (java.util.HashMap)4 BlockObject (com.emc.storageos.db.client.model.BlockObject)2 ExportGroup (com.emc.storageos.db.client.model.ExportGroup)2 ExportMask (com.emc.storageos.db.client.model.ExportMask)2 NamedURI (com.emc.storageos.db.client.model.NamedURI)2 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)2 VirtualPool (com.emc.storageos.db.client.model.VirtualPool)2 FCTN_STRING_TO_URI (com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_STRING_TO_URI)2 DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)2