Search in sources :

Example 1 with LockRetryException

use of com.emc.storageos.locking.LockRetryException in project coprhd-controller by CoprHD.

the class RPDeviceController method exportOrchestrationSteps.

/**
 * @param volumeDescriptors
 *            - Volume descriptors
 * @param rpSystemId
 *            - RP system
 * @param taskId
 *            - task ID
 * @return - True on success, false otherwise
 * @throws InternalException
 */
public boolean exportOrchestrationSteps(List<VolumeDescriptor> volumeDescriptors, URI rpSystemId, String taskId) throws InternalException {
    List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
    RPCGExportOrchestrationCompleter completer = new RPCGExportOrchestrationCompleter(volUris, taskId);
    Workflow workflow = null;
    boolean lockException = false;
    Map<URI, Set<URI>> exportGroupVolumesAdded = new HashMap<URI, Set<URI>>();
    exportGroupsCreated = new ArrayList<URI>();
    final String COMPUTE_RESOURCE_CLUSTER = "cluster";
    try {
        final String workflowKey = "rpExportOrchestration";
        if (!WorkflowService.getInstance().hasWorkflowBeenCreated(taskId, workflowKey)) {
            // Generate the Workflow.
            workflow = _workflowService.getNewWorkflow(this, EXPORT_ORCHESTRATOR_WF_NAME, true, taskId);
            // the wait for key returned by previous call
            String waitFor = null;
            ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, rpSystemId);
            // Get the CG Params based on the volume descriptors
            CGRequestParams params = this.getCGRequestParams(volumeDescriptors, rpSystem);
            updateCGParams(params);
            _log.info("Start adding RP Export Volumes steps....");
            // Get the RP Exports from the CGRequestParams object
            Collection<RPExport> rpExports = generateStorageSystemExportMaps(params, volumeDescriptors);
            Map<String, Set<URI>> rpSiteInitiatorsMap = getRPSiteInitiators(rpSystem, rpExports);
            // Acquire all the RP lock keys needed for export before we start assembling the export groups.
            acquireRPLockKeysForExport(taskId, rpExports, rpSiteInitiatorsMap);
            // or create a new one.
            for (RPExport rpExport : rpExports) {
                URI storageSystemURI = rpExport.getStorageSystem();
                String internalSiteName = rpExport.getRpSite();
                URI varrayURI = rpExport.getVarray();
                List<URI> volumes = rpExport.getVolumes();
                List<URI> initiatorSet = new ArrayList<URI>();
                String rpSiteName = (rpSystem.getRpSiteNames() != null) ? rpSystem.getRpSiteNames().get(internalSiteName) : internalSiteName;
                StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
                VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayURI);
                _log.info("--------------------");
                _log.info(String.format("RP Export: StorageSystem = [%s] RPSite = [%s] VirtualArray = [%s]", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
                boolean isJournalExport = rpExport.getIsJournalExport();
                String exportGroupGeneratedName = RPHelper.generateExportGroupName(rpSystem, storageSystem, internalSiteName, varray, isJournalExport);
                // Setup the export group - we may or may not need to create it, but we need to have everything ready in case we do
                ExportGroup exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, varray, _dbClient.queryObject(Project.class, params.getProject()), 0, isJournalExport);
                // Get the initiators of the RP Cluster (all of the RPAs on one side of a configuration)
                Map<String, Map<String, String>> rpaWWNs = RPHelper.getRecoverPointClient(rpSystem).getInitiatorWWNs(internalSiteName);
                if (rpaWWNs == null || rpaWWNs.isEmpty()) {
                    throw DeviceControllerExceptions.recoverpoint.noInitiatorsFoundOnRPAs();
                }
                // Convert to initiator object
                List<Initiator> initiators = new ArrayList<Initiator>();
                for (String rpaId : rpaWWNs.keySet()) {
                    for (Map.Entry<String, String> rpaWWN : rpaWWNs.get(rpaId).entrySet()) {
                        Initiator initiator = ExportUtils.getInitiator(rpaWWN.getKey(), _dbClient);
                        initiators.add(initiator);
                    }
                }
                // We need to find and distill only those RP initiators that correspond to the network of the
                // storage
                // system and
                // that network has front end port from the storage system.
                // In certain lab environments, its quite possible that there are 2 networks one for the storage
                // system
                // FE ports and one for
                // the BE ports.
                // In such configs, RP initiators will be spread across those 2 networks. RP controller does not
                // care
                // about storage system
                // back-end ports, so
                // we will ignore those initiators that are connected to a network that has only storage system back
                // end
                // port connectivity.
                Map<URI, Set<Initiator>> rpNetworkToInitiatorsMap = new HashMap<URI, Set<Initiator>>();
                Set<URI> rpSiteInitiatorUris = rpSiteInitiatorsMap.get(internalSiteName);
                if (rpSiteInitiatorUris != null) {
                    for (URI rpSiteInitiatorUri : rpSiteInitiatorUris) {
                        Initiator rpSiteInitiator = _dbClient.queryObject(Initiator.class, rpSiteInitiatorUri);
                        URI rpInitiatorNetworkURI = getInitiatorNetwork(exportGroup, rpSiteInitiator);
                        if (rpInitiatorNetworkURI != null) {
                            if (rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI) == null) {
                                rpNetworkToInitiatorsMap.put(rpInitiatorNetworkURI, new HashSet<Initiator>());
                            }
                            rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI).add(rpSiteInitiator);
                            _log.info(String.format("RP Initiator [%s] found on network: [%s]", rpSiteInitiator.getInitiatorPort(), rpInitiatorNetworkURI.toASCIIString()));
                        } else {
                            _log.info(String.format("RP Initiator [%s] was not found on any network. Excluding from automated exports", rpSiteInitiator.getInitiatorPort()));
                        }
                    }
                }
                // Compute numPaths. This is how its done:
                // We know the RP site and the Network/TransportZone it is on.
                // Determine all the storage ports for the storage array for all the networks they are on.
                // Next, if we find the network for the RP site in the above list, return all the storage ports
                // corresponding to that.
                // For RP we will try and use as many Storage ports as possible.
                Map<URI, List<StoragePort>> initiatorPortMap = getInitiatorPortsForArray(rpNetworkToInitiatorsMap, storageSystemURI, varrayURI, rpSiteName);
                for (URI networkURI : initiatorPortMap.keySet()) {
                    for (StoragePort storagePort : initiatorPortMap.get(networkURI)) {
                        _log.info(String.format("Network : [%s] - Port : [%s]", networkURI.toString(), storagePort.getLabel()));
                    }
                }
                int numPaths = computeNumPaths(initiatorPortMap, varrayURI, storageSystem);
                _log.info("Total paths = " + numPaths);
                // Stems from above comment where we distill the RP network and the initiators in that network.
                List<Initiator> initiatorList = new ArrayList<Initiator>();
                for (URI rpNetworkURI : rpNetworkToInitiatorsMap.keySet()) {
                    if (initiatorPortMap.containsKey(rpNetworkURI)) {
                        initiatorList.addAll(rpNetworkToInitiatorsMap.get(rpNetworkURI));
                    }
                }
                for (Initiator initiator : initiatorList) {
                    initiatorSet.add(initiator.getId());
                }
                // See if the export group already exists
                ExportGroup exportGroupInDB = exportGroupExistsInDB(exportGroup);
                boolean addExportGroupToDB = false;
                if (exportGroupInDB != null) {
                    exportGroup = exportGroupInDB;
                    // If the export already exists, check to see if any of the volumes have already been exported.
                    // No
                    // need to
                    // re-export volumes.
                    List<URI> volumesToRemove = new ArrayList<URI>();
                    for (URI volumeURI : volumes) {
                        if (exportGroup.getVolumes() != null && !exportGroup.getVolumes().isEmpty() && exportGroup.getVolumes().containsKey(volumeURI.toString())) {
                            _log.info(String.format("Volume [%s] already exported to export group [%s], " + "it will be not be re-exported", volumeURI.toString(), exportGroup.getGeneratedName()));
                            volumesToRemove.add(volumeURI);
                        }
                    }
                    // Remove volumes if they have already been exported
                    if (!volumesToRemove.isEmpty()) {
                        volumes.removeAll(volumesToRemove);
                    }
                    // nothing else needs to be done here.
                    if (volumes.isEmpty()) {
                        _log.info(String.format("No volumes needed to be exported to export group [%s], continue", exportGroup.getGeneratedName()));
                        continue;
                    }
                } else {
                    addExportGroupToDB = true;
                }
                // Add volumes to the export group
                Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
                for (URI volumeID : volumes) {
                    exportGroup.addVolume(volumeID, ExportGroup.LUN_UNASSIGNED);
                    volumesToAdd.put(volumeID, ExportGroup.LUN_UNASSIGNED);
                }
                // Keep track of volumes added to export group
                if (!volumesToAdd.isEmpty()) {
                    exportGroupVolumesAdded.put(exportGroup.getId(), volumesToAdd.keySet());
                }
                // volume
                if (rpExport.getComputeResource() != null) {
                    URI computeResource = rpExport.getComputeResource();
                    _log.info(String.format("RP Export: ComputeResource : %s", computeResource.toString()));
                    if (computeResource.toString().toLowerCase().contains(COMPUTE_RESOURCE_CLUSTER)) {
                        Cluster cluster = _dbClient.queryObject(Cluster.class, computeResource);
                        exportGroup.addCluster(cluster);
                    } else {
                        Host host = _dbClient.queryObject(Host.class, rpExport.getComputeResource());
                        exportGroup.addHost(host);
                    }
                }
                // Persist the export group
                if (addExportGroupToDB) {
                    exportGroup.addInitiators(initiatorSet);
                    exportGroup.setNumPaths(numPaths);
                    _dbClient.createObject(exportGroup);
                    // Keep track of newly created EGs in case of rollback
                    exportGroupsCreated.add(exportGroup.getId());
                } else {
                    _dbClient.updateObject(exportGroup);
                }
                // If the export group already exists, add the volumes to it, otherwise create a brand new
                // export group.
                StringBuilder buffer = new StringBuilder();
                buffer.append(String.format(DASHED_NEWLINE));
                if (!addExportGroupToDB) {
                    buffer.append(String.format("Adding volumes to existing Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
                    buffer.append(String.format("Export Group name is : [%s]%n", exportGroup.getGeneratedName()));
                    buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
                    buffer.append(String.format(DASHED_NEWLINE));
                    _log.info(buffer.toString());
                    waitFor = _exportWfUtils.generateExportGroupAddVolumes(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd);
                    _log.info("Added Export Group add volumes step in workflow");
                } else {
                    buffer.append(String.format("Creating new Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
                    buffer.append(String.format("Export Group name is: [%s]%n", exportGroup.getGeneratedName()));
                    buffer.append(String.format("Export Group will have these initiators: [%s]%n", Joiner.on(',').join(initiatorSet)));
                    buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
                    buffer.append(String.format(DASHED_NEWLINE));
                    _log.info(buffer.toString());
                    String exportStep = workflow.createStepId();
                    initTaskStatus(exportGroup, exportStep, Operation.Status.pending, "create export");
                    waitFor = _exportWfUtils.generateExportGroupCreateWorkflow(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd, initiatorSet);
                    _log.info("Added Export Group create step in workflow. New Export Group Id: " + exportGroup.getId());
                }
            }
            String successMessage = "Export orchestration completed successfully";
            // Finish up and execute the plan.
            // The Workflow will handle the TaskCompleter
            Object[] callbackArgs = new Object[] { volUris };
            workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
            // Mark this workflow as created/executed so we don't do it again on retry/resume
            WorkflowService.getInstance().markWorkflowBeenCreated(taskId, workflowKey);
        }
    } catch (LockRetryException ex) {
        /**
         * Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
         * workflow using the same taskid.
         */
        _log.warn(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
        if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
            com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
            if (!wf.getCompleted()) {
                _log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
                wf.setCompleted(true);
                _dbClient.updateObject(wf);
            }
        }
        throw ex;
    } catch (Exception ex) {
        _log.error("Could not create volumes: " + volUris, ex);
        // Rollback ViPR level RP export group changes
        rpExportGroupRollback();
        if (workflow != null) {
            _workflowService.releaseAllWorkflowLocks(workflow);
        }
        String opName = ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME.getName();
        ServiceError serviceError = null;
        if (lockException) {
            serviceError = DeviceControllerException.errors.createVolumesAborted(volUris.toString(), ex);
        } else {
            serviceError = DeviceControllerException.errors.createVolumesFailed(volUris.toString(), opName, ex);
        }
        completer.error(_dbClient, _locker, serviceError);
        return false;
    }
    _log.info("End adding RP Export Volumes steps.");
    return true;
}
Also used : VirtualArray(com.emc.storageos.db.client.model.VirtualArray) ProtectionSet(com.emc.storageos.db.client.model.ProtectionSet) Set(java.util.Set) HashSet(java.util.HashSet) StringSet(com.emc.storageos.db.client.model.StringSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) ProtectionSystem(com.emc.storageos.db.client.model.ProtectionSystem) RPCGExportOrchestrationCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.RPCGExportOrchestrationCompleter) Initiator(com.emc.storageos.db.client.model.Initiator) ApplicationAddVolumeList(com.emc.storageos.volumecontroller.ApplicationAddVolumeList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) List(java.util.List) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) StoragePort(com.emc.storageos.db.client.model.StoragePort) Workflow(com.emc.storageos.workflow.Workflow) Cluster(com.emc.storageos.db.client.model.Cluster) Host(com.emc.storageos.db.client.model.Host) LockRetryException(com.emc.storageos.locking.LockRetryException) AlternateIdConstraint(com.emc.storageos.db.client.constraint.AlternateIdConstraint) ContainmentConstraint(com.emc.storageos.db.client.constraint.ContainmentConstraint) Constraint(com.emc.storageos.db.client.constraint.Constraint) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) InternalServerErrorException(com.emc.storageos.svcs.errorhandling.resources.InternalServerErrorException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) LockRetryException(com.emc.storageos.locking.LockRetryException) FunctionalAPIActionFailedException_Exception(com.emc.fapiclient.ws.FunctionalAPIActionFailedException_Exception) URISyntaxException(java.net.URISyntaxException) WorkflowException(com.emc.storageos.workflow.WorkflowException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) FunctionalAPIInternalError_Exception(com.emc.fapiclient.ws.FunctionalAPIInternalError_Exception) CoordinatorException(com.emc.storageos.coordinator.exceptions.CoordinatorException) RecoverPointException(com.emc.storageos.recoverpoint.exceptions.RecoverPointException) ExportGroup(com.emc.storageos.db.client.model.ExportGroup) Project(com.emc.storageos.db.client.model.Project) CGRequestParams(com.emc.storageos.recoverpoint.requests.CGRequestParams) BlockObject(com.emc.storageos.db.client.model.BlockObject) DataObject(com.emc.storageos.db.client.model.DataObject) Map(java.util.Map) StringSetMap(com.emc.storageos.db.client.model.StringSetMap) OpStatusMap(com.emc.storageos.db.client.model.OpStatusMap) HashMap(java.util.HashMap)

Example 2 with LockRetryException

use of com.emc.storageos.locking.LockRetryException in project coprhd-controller by CoprHD.

the class BlockOrchestrationDeviceController method createVolumes.

/*
     * (non-Javadoc)
     * 
     * @see com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController#createVolumes(java.util.List,
     * java.lang.String)
     */
@Override
public void createVolumes(List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
    List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumes);
    VolumeCreateWorkflowCompleter completer = new VolumeCreateWorkflowCompleter(volUris, taskId, volumes);
    Workflow workflow = null;
    try {
        // Generate the Workflow.
        workflow = _workflowService.getNewWorkflow(this, CREATE_VOLUMES_WF_NAME, true, taskId);
        // the wait for key returned by previous call
        String waitFor = null;
        s_logger.info("Generating steps for create Volume");
        // First, call the BlockDeviceController to add its methods.
        waitFor = _blockDeviceController.addStepsForCreateVolumes(workflow, waitFor, volumes, taskId);
        s_logger.info("Checking for SRDF steps");
        // Call the SRDFDeviceController to add its methods if there are SRDF volumes.
        waitFor = _srdfDeviceController.addStepsForCreateVolumes(workflow, waitFor, volumes, taskId);
        s_logger.info("Checking for VPLEX steps");
        // Call the VPlexDeviceController to add its methods if there are VPLEX volumes.
        waitFor = _vplexDeviceController.addStepsForCreateVolumes(workflow, waitFor, volumes, taskId);
        s_logger.info("Checking for RP steps");
        // Call the RPDeviceController to add its methods if there are RP protections
        waitFor = _rpDeviceController.addStepsForCreateVolumes(workflow, waitFor, volumes, taskId);
        s_logger.info("Checking for Replica steps");
        // Call the ReplicaDeviceController to add its methods if volumes are added to CG, and the CG associated
        // with replication
        // group(s)
        waitFor = _replicaDeviceController.addStepsForCreateVolumes(workflow, waitFor, volumes, taskId);
        // Finish up and execute the plan.
        // The Workflow will handle the TaskCompleter
        String successMessage = "Create volumes successful for: " + volUris.toString();
        Object[] callbackArgs = new Object[] { volUris };
        workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
    } catch (LockRetryException ex) {
        /**
         * Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
         * workflow using the same taskid.
         */
        s_logger.info(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
        releaseWorkflowLocks(workflow);
        if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
            com.emc.storageos.db.client.model.Workflow wf = s_dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
            if (!wf.getCompleted()) {
                s_logger.error("Marking the status to completed for the newly created workflow {}", wf.getId());
                wf.setCompleted(true);
                s_dbClient.updateObject(wf);
            }
        }
        throw ex;
    } catch (Exception ex) {
        s_logger.error("Could not create volumes: " + volUris, ex);
        releaseWorkflowLocks(workflow);
        String opName = ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME.getName();
        ServiceError serviceError = DeviceControllerException.errors.createVolumesFailed(volUris.toString(), opName, ex);
        completer.error(s_dbClient, _locker, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) LockRetryException(com.emc.storageos.locking.LockRetryException) WorkflowException(com.emc.storageos.workflow.WorkflowException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) LockRetryException(com.emc.storageos.locking.LockRetryException) VolumeCreateWorkflowCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeCreateWorkflowCompleter) BlockObject(com.emc.storageos.db.client.model.BlockObject)

Example 3 with LockRetryException

use of com.emc.storageos.locking.LockRetryException in project coprhd-controller by CoprHD.

the class BlockDeviceExportController method exportGroupUpdate.

@Override
public void exportGroupUpdate(URI export, Map<URI, Integer> addedBlockObjectMap, Map<URI, Integer> removedBlockObjectMap, Set<URI> addedClusters, Set<URI> removedClusters, Set<URI> addedHosts, Set<URI> removedHosts, Set<URI> addedInitiators, Set<URI> removedInitiators, String opId) throws ControllerException {
    Map<URI, Map<URI, Integer>> addedStorageToBlockObjects = new HashMap<URI, Map<URI, Integer>>();
    Map<URI, Map<URI, Integer>> removedStorageToBlockObjects = new HashMap<URI, Map<URI, Integer>>();
    Workflow workflow = null;
    List<Workflow> workflowList = new ArrayList<>();
    try {
        computeDiffs(export, addedBlockObjectMap, removedBlockObjectMap, addedStorageToBlockObjects, removedStorageToBlockObjects, addedInitiators, removedInitiators, addedHosts, removedHosts, addedClusters, removedClusters);
        // Generate a flat list of volume/snap objects that will be added
        // to the export update completer so the completer will know what
        // to add upon task completion. We need not carry the block controller
        // into the completer, so we strip that out of the map for the benefit of
        // keeping the completer simple.
        Map<URI, Integer> addedBlockObjects = new HashMap<>();
        for (URI storageUri : addedStorageToBlockObjects.keySet()) {
            addedBlockObjects.putAll(addedStorageToBlockObjects.get(storageUri));
        }
        // Generate a flat list of volume/snap objects that will be removed
        // to the export update completer so the completer will know what
        // to remove upon task completion.
        Map<URI, Integer> removedBlockObjects = new HashMap<>();
        for (URI storageUri : removedStorageToBlockObjects.keySet()) {
            removedBlockObjects.putAll(removedStorageToBlockObjects.get(storageUri));
        }
        // Construct the export update completer with exactly which objects will
        // be removed and added when it is complete.
        ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(export, addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators, addedHosts, removedHosts, addedClusters, removedClusters, opId);
        _log.info("Received request to update export group. Creating master workflow.");
        workflow = _wfUtils.newWorkflow("exportGroupUpdate", false, opId);
        _log.info("Task id {} and workflow uri {}", opId, workflow.getWorkflowURI());
        workflowList.add(workflow);
        for (URI storageUri : addedStorageToBlockObjects.keySet()) {
            _log.info("Creating sub-workflow for storage system {}", String.valueOf(storageUri));
            // TODO: Need to fix, getExportMask() returns a single mask,
            // but there could be more than 1 for a array and ExportGroup
            _wfUtils.generateExportGroupUpdateWorkflow(workflow, null, null, export, getExportMask(export, storageUri), addedStorageToBlockObjects.get(storageUri), removedStorageToBlockObjects.get(storageUri), new ArrayList(addedInitiators), new ArrayList(removedInitiators), storageUri, workflowList);
        }
        if (!workflow.getAllStepStatus().isEmpty()) {
            _log.info("The updateExportWorkflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
            workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
        } else {
            taskCompleter.ready(_dbClient);
        }
    } catch (LockRetryException ex) {
        /**
         * Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
         * workflow using the same taskid.
         */
        _log.info(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
        for (Workflow workflow2 : workflowList) {
            if (workflow2 != null) {
                boolean status = _wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow2);
                _log.info("Release locks from workflow {} status {}", workflow2.getWorkflowURI(), status);
            }
        }
        if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
            com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
            if (!wf.getCompleted()) {
                _log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
                wf.setCompleted(true);
                _dbClient.updateObject(wf);
            }
        }
        throw ex;
    } catch (Exception ex) {
        ExportTaskCompleter taskCompleter = new ExportUpdateCompleter(export, opId);
        String message = "exportGroupUpdate caught an exception.";
        _log.error(message, ex);
        for (Workflow workflow2 : workflowList) {
            if (workflow2 != null) {
                boolean status = _wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow2);
                _log.info("Release locks from workflow {} status {}", workflow2.getWorkflowURI(), status);
            }
        }
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
        taskCompleter.error(_dbClient, serviceError);
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) ExportTaskCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportTaskCompleter) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) URI(java.net.URI) LockRetryException(com.emc.storageos.locking.LockRetryException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) IOException(java.io.IOException) LockRetryException(com.emc.storageos.locking.LockRetryException) ExportUpdateCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportUpdateCompleter) Map(java.util.Map) HashMap(java.util.HashMap) StringSetMap(com.emc.storageos.db.client.model.StringSetMap)

Example 4 with LockRetryException

use of com.emc.storageos.locking.LockRetryException in project coprhd-controller by CoprHD.

the class WorkflowService method acquireWorkflowStepLocks.

/**
 * Acquires locks on behalf of a workflow step. The locks will be released at the
 * end of the step, i.e. when the step is completed. This should only be called
 * from within the executing workflow step.
 * Note that if the same lock is already held by the workflow, it will not be
 * reacquired, and will not be released until the workflow completes.
 *
 * @param stepId
 *            - Workflow step id.
 * @param lockKeys
 *            - List of lock keys to be acquired
 * @param time
 *            - Maximum wait time, 0 means poll
 * @return
 *         true if locks acquired, false otherwise
 */
public boolean acquireWorkflowStepLocks(String stepId, List<String> lockKeys, long time) {
    boolean gotLocks = false;
    try {
        Workflow workflow = null;
        try {
            workflow = loadWorkflowFromStepId(stepId);
        } catch (WorkflowException ex) {
            _log.warn("Workflow not found for stepId: " + stepId);
        }
        if (workflow == null) {
            return false;
        }
        Long stepStartTimeSeconds = System.currentTimeMillis();
        StepStatus stepStatus = workflow.getStepStatusMap().get(stepId);
        if (stepStatus != null && stepStatus.startTime != null) {
            stepStartTimeSeconds = stepStatus.startTime.getTime() / MILLISECONDS_IN_SECOND;
        }
        List<String> locksToAcquire = new ArrayList<String>(lockKeys);
        // Remove any locks this workflow has already acquired,
        // so as not to acquire them multiple times.
        locksToAcquire.removeAll(_ownerLocker.getLocksForOwner(workflow.getWorkflowURI().toString()));
        // Also remove all locks already acquired in this step.
        locksToAcquire.removeAll(_ownerLocker.getLocksForOwner(stepId));
        if (locksToAcquire.isEmpty()) {
            return true;
        }
        gotLocks = _ownerLocker.acquireLocks(locksToAcquire, stepId, stepStartTimeSeconds, time);
    } catch (LockRetryException ex) {
        _log.info(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
        WorkflowStepCompleter.stepQueued(stepId);
        throw ex;
    } catch (Exception ex) {
        _log.info("Exception acquiring WorkflowStep locks: ", ex);
    }
    return gotLocks;
}
Also used : ArrayList(java.util.ArrayList) StepStatus(com.emc.storageos.workflow.Workflow.StepStatus) LockRetryException(com.emc.storageos.locking.LockRetryException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) LockRetryException(com.emc.storageos.locking.LockRetryException)

Aggregations

DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)4 LockRetryException (com.emc.storageos.locking.LockRetryException)4 ControllerException (com.emc.storageos.volumecontroller.ControllerException)4 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)3 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)3 Workflow (com.emc.storageos.workflow.Workflow)3 URI (java.net.URI)3 ArrayList (java.util.ArrayList)3 BlockObject (com.emc.storageos.db.client.model.BlockObject)2 StringSetMap (com.emc.storageos.db.client.model.StringSetMap)2 DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)2 WorkflowException (com.emc.storageos.workflow.WorkflowException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 FunctionalAPIActionFailedException_Exception (com.emc.fapiclient.ws.FunctionalAPIActionFailedException_Exception)1 FunctionalAPIInternalError_Exception (com.emc.fapiclient.ws.FunctionalAPIInternalError_Exception)1 CoordinatorException (com.emc.storageos.coordinator.exceptions.CoordinatorException)1 AlternateIdConstraint (com.emc.storageos.db.client.constraint.AlternateIdConstraint)1 Constraint (com.emc.storageos.db.client.constraint.Constraint)1 ContainmentConstraint (com.emc.storageos.db.client.constraint.ContainmentConstraint)1