use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RPDeviceController method exportOrchestrationSteps.
/**
* @param volumeDescriptors
* - Volume descriptors
* @param rpSystemId
* - RP system
* @param taskId
* - task ID
* @return - True on success, false otherwise
* @throws InternalException
*/
public boolean exportOrchestrationSteps(List<VolumeDescriptor> volumeDescriptors, URI rpSystemId, String taskId) throws InternalException {
List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
RPCGExportOrchestrationCompleter completer = new RPCGExportOrchestrationCompleter(volUris, taskId);
Workflow workflow = null;
boolean lockException = false;
Map<URI, Set<URI>> exportGroupVolumesAdded = new HashMap<URI, Set<URI>>();
exportGroupsCreated = new ArrayList<URI>();
final String COMPUTE_RESOURCE_CLUSTER = "cluster";
try {
final String workflowKey = "rpExportOrchestration";
if (!WorkflowService.getInstance().hasWorkflowBeenCreated(taskId, workflowKey)) {
// Generate the Workflow.
workflow = _workflowService.getNewWorkflow(this, EXPORT_ORCHESTRATOR_WF_NAME, true, taskId);
// the wait for key returned by previous call
String waitFor = null;
ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, rpSystemId);
// Get the CG Params based on the volume descriptors
CGRequestParams params = this.getCGRequestParams(volumeDescriptors, rpSystem);
updateCGParams(params);
_log.info("Start adding RP Export Volumes steps....");
// Get the RP Exports from the CGRequestParams object
Collection<RPExport> rpExports = generateStorageSystemExportMaps(params, volumeDescriptors);
Map<String, Set<URI>> rpSiteInitiatorsMap = getRPSiteInitiators(rpSystem, rpExports);
// Acquire all the RP lock keys needed for export before we start assembling the export groups.
acquireRPLockKeysForExport(taskId, rpExports, rpSiteInitiatorsMap);
// or create a new one.
for (RPExport rpExport : rpExports) {
URI storageSystemURI = rpExport.getStorageSystem();
String internalSiteName = rpExport.getRpSite();
URI varrayURI = rpExport.getVarray();
List<URI> volumes = rpExport.getVolumes();
List<URI> initiatorSet = new ArrayList<URI>();
String rpSiteName = (rpSystem.getRpSiteNames() != null) ? rpSystem.getRpSiteNames().get(internalSiteName) : internalSiteName;
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayURI);
_log.info("--------------------");
_log.info(String.format("RP Export: StorageSystem = [%s] RPSite = [%s] VirtualArray = [%s]", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
boolean isJournalExport = rpExport.getIsJournalExport();
String exportGroupGeneratedName = RPHelper.generateExportGroupName(rpSystem, storageSystem, internalSiteName, varray, isJournalExport);
// Setup the export group - we may or may not need to create it, but we need to have everything ready in case we do
ExportGroup exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, varray, _dbClient.queryObject(Project.class, params.getProject()), 0, isJournalExport);
// Get the initiators of the RP Cluster (all of the RPAs on one side of a configuration)
Map<String, Map<String, String>> rpaWWNs = RPHelper.getRecoverPointClient(rpSystem).getInitiatorWWNs(internalSiteName);
if (rpaWWNs == null || rpaWWNs.isEmpty()) {
throw DeviceControllerExceptions.recoverpoint.noInitiatorsFoundOnRPAs();
}
// Convert to initiator object
List<Initiator> initiators = new ArrayList<Initiator>();
for (String rpaId : rpaWWNs.keySet()) {
for (Map.Entry<String, String> rpaWWN : rpaWWNs.get(rpaId).entrySet()) {
Initiator initiator = ExportUtils.getInitiator(rpaWWN.getKey(), _dbClient);
initiators.add(initiator);
}
}
// We need to find and distill only those RP initiators that correspond to the network of the
// storage
// system and
// that network has front end port from the storage system.
// In certain lab environments, its quite possible that there are 2 networks one for the storage
// system
// FE ports and one for
// the BE ports.
// In such configs, RP initiators will be spread across those 2 networks. RP controller does not
// care
// about storage system
// back-end ports, so
// we will ignore those initiators that are connected to a network that has only storage system back
// end
// port connectivity.
Map<URI, Set<Initiator>> rpNetworkToInitiatorsMap = new HashMap<URI, Set<Initiator>>();
Set<URI> rpSiteInitiatorUris = rpSiteInitiatorsMap.get(internalSiteName);
if (rpSiteInitiatorUris != null) {
for (URI rpSiteInitiatorUri : rpSiteInitiatorUris) {
Initiator rpSiteInitiator = _dbClient.queryObject(Initiator.class, rpSiteInitiatorUri);
URI rpInitiatorNetworkURI = getInitiatorNetwork(exportGroup, rpSiteInitiator);
if (rpInitiatorNetworkURI != null) {
if (rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI) == null) {
rpNetworkToInitiatorsMap.put(rpInitiatorNetworkURI, new HashSet<Initiator>());
}
rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI).add(rpSiteInitiator);
_log.info(String.format("RP Initiator [%s] found on network: [%s]", rpSiteInitiator.getInitiatorPort(), rpInitiatorNetworkURI.toASCIIString()));
} else {
_log.info(String.format("RP Initiator [%s] was not found on any network. Excluding from automated exports", rpSiteInitiator.getInitiatorPort()));
}
}
}
// Compute numPaths. This is how its done:
// We know the RP site and the Network/TransportZone it is on.
// Determine all the storage ports for the storage array for all the networks they are on.
// Next, if we find the network for the RP site in the above list, return all the storage ports
// corresponding to that.
// For RP we will try and use as many Storage ports as possible.
Map<URI, List<StoragePort>> initiatorPortMap = getInitiatorPortsForArray(rpNetworkToInitiatorsMap, storageSystemURI, varrayURI, rpSiteName);
for (URI networkURI : initiatorPortMap.keySet()) {
for (StoragePort storagePort : initiatorPortMap.get(networkURI)) {
_log.info(String.format("Network : [%s] - Port : [%s]", networkURI.toString(), storagePort.getLabel()));
}
}
int numPaths = computeNumPaths(initiatorPortMap, varrayURI, storageSystem);
_log.info("Total paths = " + numPaths);
// Stems from above comment where we distill the RP network and the initiators in that network.
List<Initiator> initiatorList = new ArrayList<Initiator>();
for (URI rpNetworkURI : rpNetworkToInitiatorsMap.keySet()) {
if (initiatorPortMap.containsKey(rpNetworkURI)) {
initiatorList.addAll(rpNetworkToInitiatorsMap.get(rpNetworkURI));
}
}
for (Initiator initiator : initiatorList) {
initiatorSet.add(initiator.getId());
}
// See if the export group already exists
ExportGroup exportGroupInDB = exportGroupExistsInDB(exportGroup);
boolean addExportGroupToDB = false;
if (exportGroupInDB != null) {
exportGroup = exportGroupInDB;
// If the export already exists, check to see if any of the volumes have already been exported.
// No
// need to
// re-export volumes.
List<URI> volumesToRemove = new ArrayList<URI>();
for (URI volumeURI : volumes) {
if (exportGroup.getVolumes() != null && !exportGroup.getVolumes().isEmpty() && exportGroup.getVolumes().containsKey(volumeURI.toString())) {
_log.info(String.format("Volume [%s] already exported to export group [%s], " + "it will be not be re-exported", volumeURI.toString(), exportGroup.getGeneratedName()));
volumesToRemove.add(volumeURI);
}
}
// Remove volumes if they have already been exported
if (!volumesToRemove.isEmpty()) {
volumes.removeAll(volumesToRemove);
}
// nothing else needs to be done here.
if (volumes.isEmpty()) {
_log.info(String.format("No volumes needed to be exported to export group [%s], continue", exportGroup.getGeneratedName()));
continue;
}
} else {
addExportGroupToDB = true;
}
// Add volumes to the export group
Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
for (URI volumeID : volumes) {
exportGroup.addVolume(volumeID, ExportGroup.LUN_UNASSIGNED);
volumesToAdd.put(volumeID, ExportGroup.LUN_UNASSIGNED);
}
// Keep track of volumes added to export group
if (!volumesToAdd.isEmpty()) {
exportGroupVolumesAdded.put(exportGroup.getId(), volumesToAdd.keySet());
}
// volume
if (rpExport.getComputeResource() != null) {
URI computeResource = rpExport.getComputeResource();
_log.info(String.format("RP Export: ComputeResource : %s", computeResource.toString()));
if (computeResource.toString().toLowerCase().contains(COMPUTE_RESOURCE_CLUSTER)) {
Cluster cluster = _dbClient.queryObject(Cluster.class, computeResource);
exportGroup.addCluster(cluster);
} else {
Host host = _dbClient.queryObject(Host.class, rpExport.getComputeResource());
exportGroup.addHost(host);
}
}
// Persist the export group
if (addExportGroupToDB) {
exportGroup.addInitiators(initiatorSet);
exportGroup.setNumPaths(numPaths);
_dbClient.createObject(exportGroup);
// Keep track of newly created EGs in case of rollback
exportGroupsCreated.add(exportGroup.getId());
} else {
_dbClient.updateObject(exportGroup);
}
// If the export group already exists, add the volumes to it, otherwise create a brand new
// export group.
StringBuilder buffer = new StringBuilder();
buffer.append(String.format(DASHED_NEWLINE));
if (!addExportGroupToDB) {
buffer.append(String.format("Adding volumes to existing Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is : [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
waitFor = _exportWfUtils.generateExportGroupAddVolumes(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd);
_log.info("Added Export Group add volumes step in workflow");
} else {
buffer.append(String.format("Creating new Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is: [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these initiators: [%s]%n", Joiner.on(',').join(initiatorSet)));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
String exportStep = workflow.createStepId();
initTaskStatus(exportGroup, exportStep, Operation.Status.pending, "create export");
waitFor = _exportWfUtils.generateExportGroupCreateWorkflow(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd, initiatorSet);
_log.info("Added Export Group create step in workflow. New Export Group Id: " + exportGroup.getId());
}
}
String successMessage = "Export orchestration completed successfully";
// Finish up and execute the plan.
// The Workflow will handle the TaskCompleter
Object[] callbackArgs = new Object[] { volUris };
workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
// Mark this workflow as created/executed so we don't do it again on retry/resume
WorkflowService.getInstance().markWorkflowBeenCreated(taskId, workflowKey);
}
} catch (LockRetryException ex) {
/**
* Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
* workflow using the same taskid.
*/
_log.warn(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
if (!wf.getCompleted()) {
_log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
wf.setCompleted(true);
_dbClient.updateObject(wf);
}
}
throw ex;
} catch (Exception ex) {
_log.error("Could not create volumes: " + volUris, ex);
// Rollback ViPR level RP export group changes
rpExportGroupRollback();
if (workflow != null) {
_workflowService.releaseAllWorkflowLocks(workflow);
}
String opName = ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME.getName();
ServiceError serviceError = null;
if (lockException) {
serviceError = DeviceControllerException.errors.createVolumesAborted(volUris.toString(), ex);
} else {
serviceError = DeviceControllerException.errors.createVolumesFailed(volUris.toString(), opName, ex);
}
completer.error(_dbClient, _locker, serviceError);
return false;
}
_log.info("End adding RP Export Volumes steps.");
return true;
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RPDeviceController method addPreVolumeExpandSteps.
/**
* RP specific workflow steps required prior to expanding the underlying volume are added here.
* Ex. RP CG remove replication sets.
*
* @param workflow
* @param volURI
* @param expandVolURIs
* @param taskId
* @return
* @throws WorkflowException
*/
public String addPreVolumeExpandSteps(Workflow workflow, List<VolumeDescriptor> volumeDescriptors, String taskId) throws WorkflowException {
// Just grab a legit target volume that already has an assigned protection controller.
// This will work for all operations, adding, removing, vpool change, etc.
List<VolumeDescriptor> protectionControllerDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.RP_TARGET, VolumeDescriptor.Type.RP_VPLEX_VIRT_TARGET }, new VolumeDescriptor.Type[] {});
// If there are no RP volumes, just return
if (protectionControllerDescriptors.isEmpty()) {
return null;
}
// Grab any volume from the list so we can grab the protection system, which will be the same for all volumes.
Volume volume = _dbClient.queryObject(Volume.class, protectionControllerDescriptors.get(0).getVolumeURI());
ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
// Get only the RP volumes from the descriptors.
List<VolumeDescriptor> volumeDescriptorsTypeFilter = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.RP_SOURCE, VolumeDescriptor.Type.RP_EXISTING_SOURCE, VolumeDescriptor.Type.RP_VPLEX_VIRT_SOURCE }, new VolumeDescriptor.Type[] {});
// If there are no RP volumes, just return
if (volumeDescriptorsTypeFilter.isEmpty()) {
return null;
}
for (VolumeDescriptor descriptor : volumeDescriptorsTypeFilter) {
URI volURI = descriptor.getVolumeURI();
ProtectionSystem rp = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
Map<String, RecreateReplicationSetRequestParams> rsetParams = new HashMap<String, RecreateReplicationSetRequestParams>();
RecreateReplicationSetRequestParams rsetParam = getReplicationSettings(rpSystem, volURI);
rsetParams.put(RPHelper.getRPWWn(volURI, _dbClient), rsetParam);
String stepId = workflow.createStepId();
Workflow.Method deleteRsetExecuteMethod = new Workflow.Method(METHOD_DELETE_RSET_STEP, rpSystem.getId(), Arrays.asList(volURI));
Workflow.Method deleteRsetRollbackeMethod = new Workflow.Method(METHOD_DELETE_RSET_ROLLBACK_STEP, rpSystem.getId(), Arrays.asList(volURI), rsetParams);
workflow.createStep(STEP_PRE_VOLUME_EXPAND, "Pre volume expand, delete replication set subtask for RP: " + volURI.toString(), null, rpSystem.getId(), rp.getSystemType(), this.getClass(), deleteRsetExecuteMethod, deleteRsetRollbackeMethod, stepId);
_log.info("addPreVolumeExpandSteps Replication Set in workflow");
}
return STEP_PRE_VOLUME_EXPAND;
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RPDeviceController method updateConsistencyGroupPolicy.
@Override
public void updateConsistencyGroupPolicy(URI protectionDevice, URI consistencyGroup, List<URI> volumeURIs, URI newVpoolURI, String task) throws InternalException {
_log.info(String.format("Request to update consistency group policy for volumes %s through virtual pool change to %s", volumeURIs, newVpoolURI));
VolumeVpoolChangeTaskCompleter taskCompleter = null;
URI oldVpoolURI = null;
List<Volume> volumes = new ArrayList<Volume>();
List<Volume> vplexBackendVolumes = new ArrayList<Volume>();
try {
// Get all CG source volumes. The entire CG policy is being updated so we
// need to capture the existing vpools for all the source volumes before
// changing them.
List<Volume> cgVolumes = RPHelper.getCgSourceVolumes(consistencyGroup, _dbClient);
VirtualPool newVpool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
Map<URI, URI> oldVpools = new HashMap<URI, URI>();
for (Volume volume : cgVolumes) {
// Save the old virtual pool
oldVpoolURI = volume.getVirtualPool();
oldVpools.put(volume.getId(), oldVpoolURI);
// Update to the new virtual pool
volume.setVirtualPool(newVpoolURI);
volumes.add(volume);
// If this is a VPlex volume, there will be
StringSet associatedVolumeIds = volume.getAssociatedVolumes();
// Perform additional tasks if this volume is a VPlex volume
if (associatedVolumeIds != null && !associatedVolumeIds.isEmpty()) {
Volume backendSrc = null;
Volume backendHa = null;
for (String associatedVolumeId : associatedVolumeIds) {
Volume associatedVolume = _dbClient.queryObject(Volume.class, URI.create(associatedVolumeId));
// Assign the associated volumes to either be the source or HA
if (associatedVolume != null) {
if (associatedVolume.getVirtualArray().equals(volume.getVirtualArray())) {
backendSrc = associatedVolume;
} else {
backendHa = associatedVolume;
}
}
}
if (backendSrc != null) {
// Change the back end volume's vPool too
backendSrc.setVirtualPool(newVpoolURI);
vplexBackendVolumes.add(backendSrc);
_log.info(String.format("Changing VirtualPool for VPLEX backend source volume %s (%s) from %s to %s", backendSrc.getLabel(), backendSrc.getId(), oldVpoolURI, newVpoolURI));
if (backendHa != null) {
VirtualPool newHAVpool = VirtualPool.getHAVPool(newVpool, _dbClient);
if (newHAVpool == null) {
// it may not be set
newHAVpool = newVpool;
}
backendHa.setVirtualPool(newHAVpool.getId());
vplexBackendVolumes.add(backendHa);
}
}
}
}
_dbClient.updateObject(volumes);
_dbClient.updateObject(vplexBackendVolumes);
// The VolumeVpoolChangeTaskCompleter will restore the old Virtual Pool
taskCompleter = new VolumeVpoolChangeTaskCompleter(volumeURIs, oldVpools, task);
} catch (Exception ex) {
_log.error("Unexpected exception reading volume or generating taskCompleter: ", ex);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
VolumeWorkflowCompleter completer = new VolumeWorkflowCompleter(volumeURIs, task);
completer.error(_dbClient, serviceError);
}
try {
Workflow workflow = _workflowService.getNewWorkflow(this, "updateReplicationMode", false, task);
ProtectionSystem protectionSystem = _dbClient.queryObject(ProtectionSystem.class, protectionDevice);
if (!volumes.isEmpty()) {
VirtualPool newVirtualPool = _dbClient.queryObject(VirtualPool.class, newVpoolURI);
// Add workflow step
addUpdateConsistencyGroupPolicyStep(workflow, protectionSystem, consistencyGroup, newVirtualPool.getRpCopyMode());
}
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateAutoTieringPolicy workflow has {} step(s). Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Updated the consistency group policy successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception ex) {
_log.error("Unexpected exception: ", ex);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RPDeviceController method addPreRestoreVolumeSteps.
/**
* Adds the necessary RecoverPoint controller steps that need to be executed prior
* to restoring a volume from snapshot. The pre-restore step is required if we
* are restoring a native array snapshot of the following parent volumes:
* <ul>
* <li>A BlockSnapshot parent volume that is a regular RP source/target residing on a VMAX.</li>
* <li>A BlockSnapshot parent volume that is a backing volume to a VPlex distributed volume.</li>
* </ul>
*
* @param workflow
* the Workflow being constructed
* @param storageSystemURI
* the URI of storage controller
* @param volumeURI
* the URI of volume to be restored
* @param snapshotURI
* the URI of snapshot used for restoration
* @param taskId
* the top level operation's taskId
* @return A waitFor key that can be used by subsequent controllers to wait on
*/
public String addPreRestoreVolumeSteps(Workflow workflow, URI storageSystemURI, URI volumeURI, URI snapshotURI, String taskId) {
String waitFor = null;
BlockSnapshot snapshot = _dbClient.queryObject(BlockSnapshot.class, snapshotURI);
// Only consider native snapshots
if (snapshot != null && NullColumnValueGetter.isNotNullValue(snapshot.getTechnologyType()) && snapshot.getTechnologyType().equals(TechnologyType.NATIVE.toString())) {
Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
if (volume != null && storageSystem != null) {
boolean vplexDistBackingVolume = false;
URI cgId = volume.getConsistencyGroup();
Volume associatedVPlexVolume = Volume.fetchVplexVolume(_dbClient, volume);
if (associatedVPlexVolume != null && associatedVPlexVolume.getAssociatedVolumes() != null && associatedVPlexVolume.getAssociatedVolumes().size() == 2) {
vplexDistBackingVolume = true;
}
if (vplexDistBackingVolume) {
volume = associatedVPlexVolume;
}
// before performing the native block restore.
if (!NullColumnValueGetter.isNullURI(volume.getProtectionController()) && (vplexDistBackingVolume || (storageSystem != null && NullColumnValueGetter.isNotNullValue(storageSystem.getSystemType()) && storageSystem.getSystemType().equals(SystemType.vmax.toString())))) {
ProtectionSystem rpSystem = null;
rpSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
if (rpSystem == null) {
// Verify non-null storage device returned from the database client.
throw DeviceControllerExceptions.recoverpoint.failedConnectingForMonitoring(volume.getProtectionController());
}
List<URI> volumeURIs = getVolumesForRestore(snapshot, volume);
// Validate the replication sets for all volumes to restore. Must ensure the source
// volume size is not greater than the target volume size
List<Volume> volumes = _dbClient.queryObject(Volume.class, volumeURIs);
RPHelper.validateRSetVolumeSizes(_dbClient, volumes);
Map<String, RecreateReplicationSetRequestParams> rsetParams = new HashMap<String, RecreateReplicationSetRequestParams>();
// Lock CG
List<String> locks = new ArrayList<String>();
String lockName = ControllerLockingUtil.getConsistencyGroupStorageKey(_dbClient, cgId, rpSystem.getId());
if (null != lockName) {
locks.add(lockName);
acquireWorkflowLockOrThrow(workflow, locks);
}
for (URI volumeId : volumeURIs) {
Volume vol = _dbClient.queryObject(Volume.class, volumeId);
RecreateReplicationSetRequestParams rsetParam = getReplicationSettings(rpSystem, vol.getId());
rsetParams.put(RPHelper.getRPWWn(vol.getId(), _dbClient), rsetParam);
}
String stepId = workflow.createStepId();
Workflow.Method deleteRsetExecuteMethod = new Workflow.Method(METHOD_DELETE_RSET_STEP, rpSystem.getId(), volumeURIs);
Workflow.Method recreateRSetExecuteMethod = new Workflow.Method(METHOD_RECREATE_RSET_STEP, rpSystem.getId(), volumeURIs, rsetParams);
waitFor = workflow.createStep(STEP_PRE_VOLUME_RESTORE, "Pre volume restore from snapshot, delete replication set step for RP: " + volumeURI.toString(), null, rpSystem.getId(), rpSystem.getSystemType(), this.getClass(), deleteRsetExecuteMethod, recreateRSetExecuteMethod, stepId);
_log.info(String.format("Created workflow step to delete replication set for volume %s.", volume.getId().toString()));
}
}
}
return waitFor;
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RPDeviceController method addPostVolumeExpandSteps.
/**
* RP specific workflow steps after volume expansion are added here in this method
* RP CG replication sets that were removed during pre expand are reconstructed with the new expanded volumes.
*
* @param workflow
* @param waitFor
* @param volume
* descriptors
* @param taskId
* @return
* @throws WorkflowException
*/
public String addPostVolumeExpandSteps(Workflow workflow, String waitFor, List<VolumeDescriptor> volumeDescriptors, String taskId) throws WorkflowException {
// Get only the RP volumes from the descriptors.
List<VolumeDescriptor> volumeDescriptorsTypeFilter = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.RP_SOURCE, VolumeDescriptor.Type.RP_EXISTING_SOURCE, VolumeDescriptor.Type.RP_VPLEX_VIRT_SOURCE }, new VolumeDescriptor.Type[] {});
// If there are no RP volumes, just return
if (volumeDescriptorsTypeFilter.isEmpty()) {
return waitFor;
}
for (VolumeDescriptor descriptor : volumeDescriptorsTypeFilter) {
Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
Map<String, RecreateReplicationSetRequestParams> rsetParams = new HashMap<String, RecreateReplicationSetRequestParams>();
RecreateReplicationSetRequestParams rsetParam = getReplicationSettings(rpSystem, volume.getId());
rsetParams.put(RPHelper.getRPWWn(volume.getId(), _dbClient), rsetParam);
String stepId = workflow.createStepId();
Workflow.Method recreateRSetExecuteMethod = new Workflow.Method(METHOD_RECREATE_RSET_STEP, rpSystem.getId(), Arrays.asList(volume.getId()), rsetParams);
workflow.createStep(STEP_POST_VOLUME_EXPAND, "Post volume Expand, Recreate replication set subtask for RP: " + volume.toString(), waitFor, rpSystem.getId(), rpSystem.getSystemType(), this.getClass(), recreateRSetExecuteMethod, null, stepId);
_log.info("Recreate Replication Set in workflow");
}
return STEP_POST_VOLUME_EXPAND;
}
Aggregations